xref: /openbsd-src/sys/dev/pci/if_ste.c (revision fcde59b201a29a2b4570b00b71e7aa25d61cb5c1)
1 /*	$OpenBSD: if_ste.c,v 1.67 2020/07/10 13:26:38 patrick Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/timeout.h>
48 
49 #include <net/if.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 
54 #include <net/if_media.h>
55 
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59 
60 #include <uvm/uvm_extern.h>              /* for vtophys */
61 
62 #include <sys/device.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70 
71 #define STE_USEIOSPACE
72 
73 #include <dev/pci/if_stereg.h>
74 
75 int	ste_probe(struct device *, void *, void *);
76 void	ste_attach(struct device *, struct device *, void *);
77 int	ste_intr(void *);
78 void	ste_init(void *);
79 void	ste_rxeoc(struct ste_softc *);
80 void	ste_rxeof(struct ste_softc *);
81 void	ste_txeoc(struct ste_softc *);
82 void	ste_txeof(struct ste_softc *);
83 void	ste_stats_update(void *);
84 void	ste_stop(struct ste_softc *);
85 void	ste_reset(struct ste_softc *);
86 int	ste_ioctl(struct ifnet *, u_long, caddr_t);
87 int	ste_encap(struct ste_softc *, struct ste_chain *,
88 	    struct mbuf *);
89 void	ste_start(struct ifnet *);
90 void	ste_watchdog(struct ifnet *);
91 int	ste_newbuf(struct ste_softc *,
92 	    struct ste_chain_onefrag *,
93 	    struct mbuf *);
94 int	ste_ifmedia_upd(struct ifnet *);
95 void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96 
97 void	ste_mii_sync(struct ste_softc *);
98 void	ste_mii_send(struct ste_softc *, u_int32_t, int);
99 int	ste_mii_readreg(struct ste_softc *,
100 	    struct ste_mii_frame *);
101 int	ste_mii_writereg(struct ste_softc *,
102 	    struct ste_mii_frame *);
103 int	ste_miibus_readreg(struct device *, int, int);
104 void	ste_miibus_writereg(struct device *, int, int, int);
105 void	ste_miibus_statchg(struct device *);
106 
107 int	ste_eeprom_wait(struct ste_softc *);
108 int	ste_read_eeprom(struct ste_softc *, caddr_t, int,
109 	    int, int);
110 void	ste_wait(struct ste_softc *);
111 void	ste_iff(struct ste_softc *);
112 int	ste_init_rx_list(struct ste_softc *);
113 void	ste_init_tx_list(struct ste_softc *);
114 
115 #define STE_SETBIT4(sc, reg, x)				\
116 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
117 
118 #define STE_CLRBIT4(sc, reg, x)				\
119 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
120 
121 #define STE_SETBIT2(sc, reg, x)				\
122 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
123 
124 #define STE_CLRBIT2(sc, reg, x)				\
125 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
126 
127 #define STE_SETBIT1(sc, reg, x)				\
128 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
129 
130 #define STE_CLRBIT1(sc, reg, x)				\
131 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
132 
133 
134 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
135 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
136 
137 const struct pci_matchid ste_devices[] = {
138 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DFE550TX },
139 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
140 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }
141 };
142 
143 struct cfattach ste_ca = {
144 	sizeof(struct ste_softc), ste_probe, ste_attach
145 };
146 
147 struct cfdriver ste_cd = {
148 	NULL, "ste", DV_IFNET
149 };
150 
151 /*
152  * Sync the PHYs by setting data bit and strobing the clock 32 times.
153  */
154 void
155 ste_mii_sync(struct ste_softc *sc)
156 {
157 	int		i;
158 
159 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
160 
161 	for (i = 0; i < 32; i++) {
162 		MII_SET(STE_PHYCTL_MCLK);
163 		DELAY(1);
164 		MII_CLR(STE_PHYCTL_MCLK);
165 		DELAY(1);
166 	}
167 }
168 
169 /*
170  * Clock a series of bits through the MII.
171  */
172 void
173 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
174 {
175 	int		i;
176 
177 	MII_CLR(STE_PHYCTL_MCLK);
178 
179 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
180                 if (bits & i) {
181 			MII_SET(STE_PHYCTL_MDATA);
182                 } else {
183 			MII_CLR(STE_PHYCTL_MDATA);
184                 }
185 		DELAY(1);
186 		MII_CLR(STE_PHYCTL_MCLK);
187 		DELAY(1);
188 		MII_SET(STE_PHYCTL_MCLK);
189 	}
190 }
191 
192 /*
193  * Read an PHY register through the MII.
194  */
195 int
196 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
197 {
198 	int		ack, i, s;
199 
200 	s = splnet();
201 
202 	/*
203 	 * Set up frame for RX.
204 	 */
205 	frame->mii_stdelim = STE_MII_STARTDELIM;
206 	frame->mii_opcode = STE_MII_READOP;
207 	frame->mii_turnaround = 0;
208 	frame->mii_data = 0;
209 
210 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
211 	/*
212  	 * Turn on data xmit.
213 	 */
214 	MII_SET(STE_PHYCTL_MDIR);
215 
216 	ste_mii_sync(sc);
217 
218 	/*
219 	 * Send command/address info.
220 	 */
221 	ste_mii_send(sc, frame->mii_stdelim, 2);
222 	ste_mii_send(sc, frame->mii_opcode, 2);
223 	ste_mii_send(sc, frame->mii_phyaddr, 5);
224 	ste_mii_send(sc, frame->mii_regaddr, 5);
225 
226 	/* Turn off xmit. */
227 	MII_CLR(STE_PHYCTL_MDIR);
228 
229 	/* Idle bit */
230 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
231 	DELAY(1);
232 	MII_SET(STE_PHYCTL_MCLK);
233 	DELAY(1);
234 
235 	/* Check for ack */
236 	MII_CLR(STE_PHYCTL_MCLK);
237 	DELAY(1);
238 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
239 	MII_SET(STE_PHYCTL_MCLK);
240 	DELAY(1);
241 
242 	/*
243 	 * Now try reading data bits. If the ack failed, we still
244 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
245 	 */
246 	if (ack) {
247 		for(i = 0; i < 16; i++) {
248 			MII_CLR(STE_PHYCTL_MCLK);
249 			DELAY(1);
250 			MII_SET(STE_PHYCTL_MCLK);
251 			DELAY(1);
252 		}
253 		goto fail;
254 	}
255 
256 	for (i = 0x8000; i; i >>= 1) {
257 		MII_CLR(STE_PHYCTL_MCLK);
258 		DELAY(1);
259 		if (!ack) {
260 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
261 				frame->mii_data |= i;
262 			DELAY(1);
263 		}
264 		MII_SET(STE_PHYCTL_MCLK);
265 		DELAY(1);
266 	}
267 
268 fail:
269 
270 	MII_CLR(STE_PHYCTL_MCLK);
271 	DELAY(1);
272 	MII_SET(STE_PHYCTL_MCLK);
273 	DELAY(1);
274 
275 	splx(s);
276 
277 	if (ack)
278 		return(1);
279 	return(0);
280 }
281 
282 /*
283  * Write to a PHY register through the MII.
284  */
285 int
286 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
287 {
288 	int		s;
289 
290 	s = splnet();
291 	/*
292 	 * Set up frame for TX.
293 	 */
294 
295 	frame->mii_stdelim = STE_MII_STARTDELIM;
296 	frame->mii_opcode = STE_MII_WRITEOP;
297 	frame->mii_turnaround = STE_MII_TURNAROUND;
298 
299 	/*
300  	 * Turn on data output.
301 	 */
302 	MII_SET(STE_PHYCTL_MDIR);
303 
304 	ste_mii_sync(sc);
305 
306 	ste_mii_send(sc, frame->mii_stdelim, 2);
307 	ste_mii_send(sc, frame->mii_opcode, 2);
308 	ste_mii_send(sc, frame->mii_phyaddr, 5);
309 	ste_mii_send(sc, frame->mii_regaddr, 5);
310 	ste_mii_send(sc, frame->mii_turnaround, 2);
311 	ste_mii_send(sc, frame->mii_data, 16);
312 
313 	/* Idle bit. */
314 	MII_SET(STE_PHYCTL_MCLK);
315 	DELAY(1);
316 	MII_CLR(STE_PHYCTL_MCLK);
317 	DELAY(1);
318 
319 	/*
320 	 * Turn off xmit.
321 	 */
322 	MII_CLR(STE_PHYCTL_MDIR);
323 
324 	splx(s);
325 
326 	return(0);
327 }
328 
329 int
330 ste_miibus_readreg(struct device *self, int phy, int reg)
331 {
332 	struct ste_softc	*sc = (struct ste_softc *)self;
333 	struct ste_mii_frame	frame;
334 
335 	if (sc->ste_one_phy && phy != 0)
336 		return (0);
337 
338 	bzero(&frame, sizeof(frame));
339 
340 	frame.mii_phyaddr = phy;
341 	frame.mii_regaddr = reg;
342 	ste_mii_readreg(sc, &frame);
343 
344 	return(frame.mii_data);
345 }
346 
347 void
348 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
349 {
350 	struct ste_softc	*sc = (struct ste_softc *)self;
351 	struct ste_mii_frame	frame;
352 
353 	bzero(&frame, sizeof(frame));
354 
355 	frame.mii_phyaddr = phy;
356 	frame.mii_regaddr = reg;
357 	frame.mii_data = data;
358 
359 	ste_mii_writereg(sc, &frame);
360 }
361 
362 void
363 ste_miibus_statchg(struct device *self)
364 {
365 	struct ste_softc	*sc = (struct ste_softc *)self;
366 	struct mii_data		*mii;
367 	int fdx, fcur;
368 
369 	mii = &sc->sc_mii;
370 
371 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
372 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
373 
374 	if ((fcur && fdx) || (! fcur && ! fdx))
375 		return;
376 
377 	STE_SETBIT4(sc, STE_DMACTL,
378 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
379 	ste_wait(sc);
380 
381 	if (fdx)
382 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
383 	else
384 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
385 
386 	STE_SETBIT4(sc, STE_DMACTL,
387 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
388 }
389 
390 int
391 ste_ifmedia_upd(struct ifnet *ifp)
392 {
393 	struct ste_softc	*sc;
394 	struct mii_data		*mii;
395 
396 	sc = ifp->if_softc;
397 	mii = &sc->sc_mii;
398 	sc->ste_link = 0;
399 	if (mii->mii_instance) {
400 		struct mii_softc	*miisc;
401 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
402 			mii_phy_reset(miisc);
403 	}
404 	mii_mediachg(mii);
405 
406 	return(0);
407 }
408 
409 void
410 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
411 {
412 	struct ste_softc	*sc;
413 	struct mii_data		*mii;
414 
415 	sc = ifp->if_softc;
416 	mii = &sc->sc_mii;
417 
418 	mii_pollstat(mii);
419 	ifmr->ifm_active = mii->mii_media_active;
420 	ifmr->ifm_status = mii->mii_media_status;
421 }
422 
423 void
424 ste_wait(struct ste_softc *sc)
425 {
426 	int		i;
427 
428 	for (i = 0; i < STE_TIMEOUT; i++) {
429 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
430 			break;
431 	}
432 
433 	if (i == STE_TIMEOUT)
434 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
435 }
436 
437 /*
438  * The EEPROM is slow: give it time to come ready after issuing
439  * it a command.
440  */
441 int
442 ste_eeprom_wait(struct ste_softc *sc)
443 {
444 	int		i;
445 
446 	DELAY(1000);
447 
448 	for (i = 0; i < 100; i++) {
449 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
450 			DELAY(1000);
451 		else
452 			break;
453 	}
454 
455 	if (i == 100) {
456 		printf("%s: eeprom failed to come ready\n",
457 		    sc->sc_dev.dv_xname);
458 		return(1);
459 	}
460 
461 	return(0);
462 }
463 
464 /*
465  * Read a sequence of words from the EEPROM. Note that ethernet address
466  * data is stored in the EEPROM in network byte order.
467  */
468 int
469 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
470 {
471 	int			err = 0, i;
472 	u_int16_t		word = 0, *ptr;
473 
474 	if (ste_eeprom_wait(sc))
475 		return(1);
476 
477 	for (i = 0; i < cnt; i++) {
478 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
479 		err = ste_eeprom_wait(sc);
480 		if (err)
481 			break;
482 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
483 		ptr = (u_int16_t *)(dest + (i * 2));
484 		if (swap)
485 			*ptr = ntohs(word);
486 		else
487 			*ptr = word;
488 	}
489 
490 	return(err ? 1 : 0);
491 }
492 
493 void
494 ste_iff(struct ste_softc *sc)
495 {
496 	struct ifnet		*ifp = &sc->arpcom.ac_if;
497 	struct arpcom		*ac = &sc->arpcom;
498 	struct ether_multi	*enm;
499 	struct ether_multistep	step;
500 	u_int32_t		rxmode, hashes[2];
501 	int			h = 0;
502 
503 	rxmode = CSR_READ_1(sc, STE_RX_MODE);
504 	rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST |
505 	    STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC |
506 	    STE_RXMODE_UNICAST);
507 	bzero(hashes, sizeof(hashes));
508 	ifp->if_flags &= ~IFF_ALLMULTI;
509 
510 	/*
511 	 * Always accept broadcast frames.
512 	 * Always accept frames destined to our station address.
513 	 */
514 	rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST;
515 
516 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
517 		ifp->if_flags |= IFF_ALLMULTI;
518 		rxmode |= STE_RXMODE_ALLMULTI;
519 		if (ifp->if_flags & IFF_PROMISC)
520 			rxmode |= STE_RXMODE_PROMISC;
521 	} else {
522 		rxmode |= STE_RXMODE_MULTIHASH;
523 
524 		/* now program new ones */
525 		ETHER_FIRST_MULTI(step, ac, enm);
526 		while (enm != NULL) {
527 			h = ether_crc32_be(enm->enm_addrlo,
528 			    ETHER_ADDR_LEN) & 0x3F;
529 
530 			if (h < 32)
531 				hashes[0] |= (1 << h);
532 			else
533 				hashes[1] |= (1 << (h - 32));
534 
535 			ETHER_NEXT_MULTI(step, enm);
536 		}
537 	}
538 
539 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
540 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
541 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
542 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
543 	CSR_WRITE_1(sc, STE_RX_MODE, rxmode);
544 }
545 
546 int
547 ste_intr(void *xsc)
548 {
549 	struct ste_softc	*sc;
550 	struct ifnet		*ifp;
551 	u_int16_t		status;
552 	int			claimed = 0;
553 
554 	sc = xsc;
555 	ifp = &sc->arpcom.ac_if;
556 
557 	/* See if this is really our interrupt. */
558 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
559 		return claimed;
560 
561 	for (;;) {
562 		status = CSR_READ_2(sc, STE_ISR_ACK);
563 
564 		if (!(status & STE_INTRS))
565 			break;
566 
567 		claimed = 1;
568 
569 		if (status & STE_ISR_RX_DMADONE) {
570 			ste_rxeoc(sc);
571 			ste_rxeof(sc);
572 		}
573 
574 		if (status & STE_ISR_TX_DMADONE)
575 			ste_txeof(sc);
576 
577 		if (status & STE_ISR_TX_DONE)
578 			ste_txeoc(sc);
579 
580 		if (status & STE_ISR_STATS_OFLOW) {
581 			timeout_del(&sc->sc_stats_tmo);
582 			ste_stats_update(sc);
583 		}
584 
585 		if (status & STE_ISR_LINKEVENT)
586 			mii_pollstat(&sc->sc_mii);
587 
588 		if (status & STE_ISR_HOSTERR)
589 			ste_init(sc);
590 	}
591 
592 	/* Re-enable interrupts */
593 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
594 
595 	if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd))
596 		ste_start(ifp);
597 
598 	return claimed;
599 }
600 
601 void
602 ste_rxeoc(struct ste_softc *sc)
603 {
604 	struct ste_chain_onefrag *cur_rx;
605 
606 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
607 		cur_rx = sc->ste_cdata.ste_rx_head;
608 		do {
609 			cur_rx = cur_rx->ste_next;
610 			/* If the ring is empty, just return. */
611 			if (cur_rx == sc->ste_cdata.ste_rx_head)
612 				return;
613 		} while (cur_rx->ste_ptr->ste_status == 0);
614 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
615 			/* We've fallen behind the chip: catch it. */
616 			sc->ste_cdata.ste_rx_head = cur_rx;
617 		}
618 	}
619 }
620 
621 /*
622  * A frame has been uploaded: pass the resulting mbuf chain up to
623  * the higher level protocols.
624  */
625 void
626 ste_rxeof(struct ste_softc *sc)
627 {
628         struct mbuf		*m;
629 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
630         struct ifnet		*ifp;
631 	struct ste_chain_onefrag	*cur_rx;
632 	int			total_len = 0, count=0;
633 	u_int32_t		rxstat;
634 
635 	ifp = &sc->arpcom.ac_if;
636 
637 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
638 	      & STE_RXSTAT_DMADONE) {
639 		if ((STE_RX_LIST_CNT - count) < 3)
640 			break;
641 
642 		cur_rx = sc->ste_cdata.ste_rx_head;
643 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
644 
645 		/*
646 		 * If an error occurs, update stats, clear the
647 		 * status word and leave the mbuf cluster in place:
648 		 * it should simply get re-used next time this descriptor
649 	 	 * comes up in the ring.
650 		 */
651 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
652 			ifp->if_ierrors++;
653 			cur_rx->ste_ptr->ste_status = 0;
654 			continue;
655 		}
656 
657 		/*
658 		 * If there error bit was not set, the upload complete
659 		 * bit should be set which means we have a valid packet.
660 		 * If not, something truly strange has happened.
661 		 */
662 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
663 			printf("%s: bad receive status -- packet dropped",
664 				sc->sc_dev.dv_xname);
665 			ifp->if_ierrors++;
666 			cur_rx->ste_ptr->ste_status = 0;
667 			continue;
668 		}
669 
670 		/* No errors; receive the packet. */
671 		m = cur_rx->ste_mbuf;
672 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
673 
674 		/*
675 		 * Try to conjure up a new mbuf cluster. If that
676 		 * fails, it means we have an out of memory condition and
677 		 * should leave the buffer in place and continue. This will
678 		 * result in a lost packet, but there's little else we
679 		 * can do in this situation.
680 		 */
681 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
682 			ifp->if_ierrors++;
683 			cur_rx->ste_ptr->ste_status = 0;
684 			continue;
685 		}
686 
687 		m->m_pkthdr.len = m->m_len = total_len;
688 
689 		ml_enqueue(&ml, m);
690 
691 		cur_rx->ste_ptr->ste_status = 0;
692 		count++;
693 	}
694 
695 	if_input(ifp, &ml);
696 }
697 
698 void
699 ste_txeoc(struct ste_softc *sc)
700 {
701 	u_int8_t		txstat;
702 	struct ifnet		*ifp;
703 
704 	ifp = &sc->arpcom.ac_if;
705 
706 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
707 	    STE_TXSTATUS_TXDONE) {
708 		if (txstat & STE_TXSTATUS_UNDERRUN ||
709 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
710 		    txstat & STE_TXSTATUS_RECLAIMERR) {
711 			ifp->if_oerrors++;
712 			printf("%s: transmission error: %x\n",
713 			    sc->sc_dev.dv_xname, txstat);
714 
715 			ste_init(sc);
716 
717 			if (txstat & STE_TXSTATUS_UNDERRUN &&
718 			    sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
719 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
720 				printf("%s: tx underrun, increasing tx"
721 				    " start threshold to %d bytes\n",
722 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
723 			}
724 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
725 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
726 			    (ETHER_MAX_DIX_LEN >> 4));
727 		}
728 		ste_init(sc);
729 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
730 	}
731 }
732 
733 void
734 ste_txeof(struct ste_softc *sc)
735 {
736 	struct ste_chain	*cur_tx = NULL;
737 	struct ifnet		*ifp;
738 	int			idx;
739 
740 	ifp = &sc->arpcom.ac_if;
741 
742 	idx = sc->ste_cdata.ste_tx_cons;
743 	while(idx != sc->ste_cdata.ste_tx_prod) {
744 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
745 
746 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
747 			break;
748 
749 		m_freem(cur_tx->ste_mbuf);
750 		cur_tx->ste_mbuf = NULL;
751 		ifq_clr_oactive(&ifp->if_snd);
752 
753 		STE_INC(idx, STE_TX_LIST_CNT);
754 	}
755 
756 	sc->ste_cdata.ste_tx_cons = idx;
757 	if (idx == sc->ste_cdata.ste_tx_prod)
758 		ifp->if_timer = 0;
759 }
760 
761 void
762 ste_stats_update(void *xsc)
763 {
764 	struct ste_softc	*sc;
765 	struct ifnet		*ifp;
766 	struct mii_data		*mii;
767 	int			s;
768 
769 	s = splnet();
770 
771 	sc = xsc;
772 	ifp = &sc->arpcom.ac_if;
773 	mii = &sc->sc_mii;
774 
775 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
776 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
777 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
778 
779 	if (!sc->ste_link) {
780 		mii_pollstat(mii);
781 		if (mii->mii_media_status & IFM_ACTIVE &&
782 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
783 			sc->ste_link++;
784 			/*
785 			 * we don't get a call-back on re-init so do it
786 			 * otherwise we get stuck in the wrong link state
787 			 */
788 			ste_miibus_statchg((struct device *)sc);
789 			if (!ifq_empty(&ifp->if_snd))
790 				ste_start(ifp);
791 		}
792 	}
793 
794 	timeout_add_sec(&sc->sc_stats_tmo, 1);
795 	splx(s);
796 }
797 
798 /*
799  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
800  * IDs against our list and return a device name if we find a match.
801  */
802 int
803 ste_probe(struct device *parent, void *match, void *aux)
804 {
805 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
806 	    nitems(ste_devices)));
807 }
808 
809 /*
810  * Attach the interface. Allocate softc structures, do ifmedia
811  * setup and ethernet/BPF attach.
812  */
813 void
814 ste_attach(struct device *parent, struct device *self, void *aux)
815 {
816 	const char		*intrstr = NULL;
817 	struct ste_softc	*sc = (struct ste_softc *)self;
818 	struct pci_attach_args	*pa = aux;
819 	pci_chipset_tag_t	pc = pa->pa_pc;
820 	pci_intr_handle_t	ih;
821 	struct ifnet		*ifp;
822 	bus_size_t		size;
823 
824 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
825 
826 	/*
827 	 * Only use one PHY since this chip reports multiple
828 	 * Note on the DFE-550TX the PHY is at 1 on the DFE-580TX
829 	 * it is at 0 & 1.  It is rev 0x12.
830 	 */
831 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
832 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_DFE550TX &&
833 	    PCI_REVISION(pa->pa_class) == 0x12)
834 		sc->ste_one_phy = 1;
835 
836 	/*
837 	 * Map control/status registers.
838 	 */
839 
840 #ifdef STE_USEIOSPACE
841 	if (pci_mapreg_map(pa, STE_PCI_LOIO,
842 	    PCI_MAPREG_TYPE_IO, 0,
843 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
844 		printf(": can't map i/o space\n");
845 		return;
846 	}
847  #else
848 	if (pci_mapreg_map(pa, STE_PCI_LOMEM,
849 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
850 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
851 		printf(": can't map mem space\n");
852 		return;
853 	}
854 #endif
855 
856 	/* Allocate interrupt */
857 	if (pci_intr_map(pa, &ih)) {
858 		printf(": couldn't map interrupt\n");
859 		goto fail_1;
860 	}
861 	intrstr = pci_intr_string(pc, ih);
862 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
863 	    self->dv_xname);
864 	if (sc->sc_ih == NULL) {
865 		printf(": couldn't establish interrupt");
866 		if (intrstr != NULL)
867 			printf(" at %s", intrstr);
868 		printf("\n");
869 		goto fail_1;
870 	}
871 	printf(": %s", intrstr);
872 
873 	/* Reset the adapter. */
874 	ste_reset(sc);
875 
876 	/*
877 	 * Get station address from the EEPROM.
878 	 */
879 	if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
880 	    STE_EEADDR_NODE0, 3, 0)) {
881 		printf(": failed to read station address\n");
882 		goto fail_2;
883 	}
884 
885 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
886 
887 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
888 	    M_DEVBUF, M_DONTWAIT);
889 	if (sc->ste_ldata_ptr == NULL) {
890 		printf(": no memory for list buffers!\n");
891 		goto fail_2;
892 	}
893 
894 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
895 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
896 
897 	ifp = &sc->arpcom.ac_if;
898 	ifp->if_softc = sc;
899 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
900 	ifp->if_ioctl = ste_ioctl;
901 	ifp->if_start = ste_start;
902 	ifp->if_watchdog = ste_watchdog;
903 	ifq_set_maxlen(&ifp->if_snd, STE_TX_LIST_CNT - 1);
904 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
905 	ifp->if_capabilities = IFCAP_VLAN_MTU;
906 
907 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
908 
909 	sc->sc_mii.mii_ifp = ifp;
910 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
911 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
912 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
913 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
914 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
915 	    0);
916 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
917 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
918 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
919 	} else
920 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
921 
922 	/*
923 	 * Call MI attach routines.
924 	 */
925 	if_attach(ifp);
926 	ether_ifattach(ifp);
927 	return;
928 
929 fail_2:
930 	pci_intr_disestablish(pc, sc->sc_ih);
931 
932 fail_1:
933 	bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
934 }
935 
936 int
937 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
938 {
939 	struct mbuf		*m_new = NULL;
940 
941 	if (m == NULL) {
942 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
943 		if (m_new == NULL)
944 			return(ENOBUFS);
945 		MCLGET(m_new, M_DONTWAIT);
946 		if (!(m_new->m_flags & M_EXT)) {
947 			m_freem(m_new);
948 			return(ENOBUFS);
949 		}
950 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
951 	} else {
952 		m_new = m;
953 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
954 		m_new->m_data = m_new->m_ext.ext_buf;
955 	}
956 
957 	m_adj(m_new, ETHER_ALIGN);
958 
959 	c->ste_mbuf = m_new;
960 	c->ste_ptr->ste_status = 0;
961 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
962 	c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
963 
964 	return(0);
965 }
966 
967 int
968 ste_init_rx_list(struct ste_softc *sc)
969 {
970 	struct ste_chain_data	*cd;
971 	struct ste_list_data	*ld;
972 	int			i;
973 
974 	cd = &sc->ste_cdata;
975 	ld = sc->ste_ldata;
976 
977 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
978 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
979 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
980 			return(ENOBUFS);
981 		if (i == (STE_RX_LIST_CNT - 1)) {
982 			cd->ste_rx_chain[i].ste_next =
983 			    &cd->ste_rx_chain[0];
984 			ld->ste_rx_list[i].ste_next =
985 			    vtophys((vaddr_t)&ld->ste_rx_list[0]);
986 		} else {
987 			cd->ste_rx_chain[i].ste_next =
988 			    &cd->ste_rx_chain[i + 1];
989 			ld->ste_rx_list[i].ste_next =
990 			    vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
991 		}
992 		ld->ste_rx_list[i].ste_status = 0;
993 	}
994 
995 	cd->ste_rx_head = &cd->ste_rx_chain[0];
996 
997 	return(0);
998 }
999 
1000 void
1001 ste_init_tx_list(struct ste_softc *sc)
1002 {
1003 	struct ste_chain_data	*cd;
1004 	struct ste_list_data	*ld;
1005 	int			i;
1006 
1007 	cd = &sc->ste_cdata;
1008 	ld = sc->ste_ldata;
1009 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1010 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1011 		cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1012 		if (i == (STE_TX_LIST_CNT - 1))
1013 			cd->ste_tx_chain[i].ste_next =
1014 			    &cd->ste_tx_chain[0];
1015 		else
1016 			cd->ste_tx_chain[i].ste_next =
1017 			    &cd->ste_tx_chain[i + 1];
1018 	}
1019 
1020 	bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1021 
1022 	cd->ste_tx_prod = 0;
1023 	cd->ste_tx_cons = 0;
1024 }
1025 
1026 void
1027 ste_init(void *xsc)
1028 {
1029 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1030 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1031 	struct mii_data		*mii;
1032 	int			i, s;
1033 
1034 	s = splnet();
1035 
1036 	ste_stop(sc);
1037 	/* Reset the chip to a known state. */
1038 	ste_reset(sc);
1039 
1040 	mii = &sc->sc_mii;
1041 
1042 	/* Init our MAC address */
1043 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1044 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1045 	}
1046 
1047 	/* Init RX list */
1048 	if (ste_init_rx_list(sc) == ENOBUFS) {
1049 		printf("%s: initialization failed: no "
1050 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1051 		ste_stop(sc);
1052 		splx(s);
1053 		return;
1054 	}
1055 
1056 	/* Set RX polling interval */
1057 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1058 
1059 	/* Init TX descriptors */
1060 	ste_init_tx_list(sc);
1061 
1062 	/* Set the TX freethresh value */
1063 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1064 
1065 	/* Set the TX start threshold for best performance. */
1066 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1067 
1068 	/* Set the TX reclaim threshold. */
1069 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1070 
1071 	/* Program promiscuous mode and multicast filters. */
1072 	ste_iff(sc);
1073 
1074 	/* Load the address of the RX list. */
1075 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1076 	ste_wait(sc);
1077 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1078 	    vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1079 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1080 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1081 
1082 	/* Set TX polling interval (defer until we TX first packet) */
1083 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1084 
1085 	/* Load address of the TX list */
1086 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1087 	ste_wait(sc);
1088 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1089 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1090 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1091 	ste_wait(sc);
1092 	sc->ste_tx_prev=NULL;
1093 
1094 	/* Enable receiver and transmitter */
1095 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1096 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1097 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1098 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1099 
1100 	/* Enable stats counters. */
1101 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1102 
1103 	/* Enable interrupts. */
1104 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1105 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1106 
1107 	/* Accept VLAN length packets */
1108 	CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1109 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1110 
1111 	ste_ifmedia_upd(ifp);
1112 
1113 	ifp->if_flags |= IFF_RUNNING;
1114 	ifq_clr_oactive(&ifp->if_snd);
1115 
1116 	splx(s);
1117 
1118 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1119 	timeout_add_sec(&sc->sc_stats_tmo, 1);
1120 }
1121 
1122 void
1123 ste_stop(struct ste_softc *sc)
1124 {
1125 	int			i;
1126 	struct ifnet		*ifp;
1127 
1128 	ifp = &sc->arpcom.ac_if;
1129 
1130 	timeout_del(&sc->sc_stats_tmo);
1131 
1132 	ifp->if_flags &= ~IFF_RUNNING;
1133 	ifq_clr_oactive(&ifp->if_snd);
1134 
1135 	CSR_WRITE_2(sc, STE_IMR, 0);
1136 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1137 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1138 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1139 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1140 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1141 	ste_wait(sc);
1142 	/*
1143 	 * Try really hard to stop the RX engine or under heavy RX
1144 	 * data chip will write into de-allocated memory.
1145 	 */
1146 	ste_reset(sc);
1147 
1148 	sc->ste_link = 0;
1149 
1150 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1151 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1152 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1153 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1154 		}
1155 	}
1156 
1157 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1158 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1159 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1160 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1161 		}
1162 	}
1163 
1164 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1165 }
1166 
1167 void
1168 ste_reset(struct ste_softc *sc)
1169 {
1170 	int		i;
1171 
1172 	STE_SETBIT4(sc, STE_ASICCTL,
1173 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1174 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1175 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1176 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1177 	    STE_ASICCTL_EXTRESET_RESET);
1178 
1179 	DELAY(100000);
1180 
1181 	for (i = 0; i < STE_TIMEOUT; i++) {
1182 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1183 			break;
1184 	}
1185 
1186 	if (i == STE_TIMEOUT)
1187 		printf("%s: global reset never completed\n",
1188 		    sc->sc_dev.dv_xname);
1189 }
1190 
1191 int
1192 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1193 {
1194 	struct ste_softc	*sc = ifp->if_softc;
1195 	struct ifreq		*ifr = (struct ifreq *) data;
1196 	int			s, error = 0;
1197 
1198 	s = splnet();
1199 
1200 	switch(command) {
1201 	case SIOCSIFADDR:
1202 		ifp->if_flags |= IFF_UP;
1203 		if (!(ifp->if_flags & IFF_RUNNING))
1204 			ste_init(sc);
1205 		break;
1206 
1207 	case SIOCSIFFLAGS:
1208 		if (ifp->if_flags & IFF_UP) {
1209 			if (ifp->if_flags & IFF_RUNNING)
1210 				error = ENETRESET;
1211 			else {
1212 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1213 				ste_init(sc);
1214 			}
1215 		} else {
1216 			if (ifp->if_flags & IFF_RUNNING)
1217 				ste_stop(sc);
1218 		}
1219 		break;
1220 
1221 	case SIOCGIFMEDIA:
1222 	case SIOCSIFMEDIA:
1223 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1224 		break;
1225 
1226 	default:
1227 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1228 	}
1229 
1230 	if (error == ENETRESET) {
1231 		if (ifp->if_flags & IFF_RUNNING)
1232 			ste_iff(sc);
1233 		error = 0;
1234 	}
1235 
1236 	splx(s);
1237 	return(error);
1238 }
1239 
1240 int
1241 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1242 {
1243 	int			frag = 0;
1244 	struct ste_frag		*f = NULL;
1245 	struct mbuf		*m;
1246 	struct ste_desc		*d;
1247 
1248 	d = c->ste_ptr;
1249 	d->ste_ctl = 0;
1250 
1251 encap_retry:
1252 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1253 		if (m->m_len != 0) {
1254 			if (frag == STE_MAXFRAGS)
1255 				break;
1256 			f = &d->ste_frags[frag];
1257 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1258 			f->ste_len = m->m_len;
1259 			frag++;
1260 		}
1261 	}
1262 
1263 	if (m != NULL) {
1264 		struct mbuf *mn;
1265 
1266 		/*
1267 		 * We ran out of segments. We have to recopy this
1268 		 * mbuf chain first. Bail out if we can't get the
1269 		 * new buffers.
1270 		 */
1271 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1272 		if (mn == NULL) {
1273 			m_freem(m_head);
1274 			return ENOMEM;
1275 		}
1276 		if (m_head->m_pkthdr.len > MHLEN) {
1277 			MCLGET(mn, M_DONTWAIT);
1278 			if ((mn->m_flags & M_EXT) == 0) {
1279 				m_freem(mn);
1280 				m_freem(m_head);
1281 				return ENOMEM;
1282 			}
1283 		}
1284 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1285 			   mtod(mn, caddr_t));
1286 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1287 		m_freem(m_head);
1288 		m_head = mn;
1289 		goto encap_retry;
1290 	}
1291 
1292 	c->ste_mbuf = m_head;
1293 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1294 	d->ste_ctl = 1;
1295 
1296 	return(0);
1297 }
1298 
1299 void
1300 ste_start(struct ifnet *ifp)
1301 {
1302 	struct ste_softc	*sc;
1303 	struct mbuf		*m_head = NULL;
1304 	struct ste_chain	*cur_tx;
1305 	int			idx;
1306 
1307 	sc = ifp->if_softc;
1308 
1309 	if (!sc->ste_link)
1310 		return;
1311 
1312 	if (ifq_is_oactive(&ifp->if_snd))
1313 		return;
1314 
1315 	idx = sc->ste_cdata.ste_tx_prod;
1316 
1317 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1318 		/*
1319 		 * We cannot re-use the last (free) descriptor;
1320 		 * the chip may not have read its ste_next yet.
1321 		 */
1322 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1323 		    sc->ste_cdata.ste_tx_cons) {
1324 			ifq_set_oactive(&ifp->if_snd);
1325 			break;
1326 		}
1327 
1328 		m_head = ifq_dequeue(&ifp->if_snd);
1329 		if (m_head == NULL)
1330 			break;
1331 
1332 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1333 
1334 		if (ste_encap(sc, cur_tx, m_head) != 0)
1335 			break;
1336 
1337 		cur_tx->ste_ptr->ste_next = 0;
1338 
1339 		if (sc->ste_tx_prev == NULL) {
1340 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1341 			/* Load address of the TX list */
1342 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1343 			ste_wait(sc);
1344 
1345 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1346 			    vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1347 
1348 			/* Set TX polling interval to start TX engine */
1349 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1350 
1351 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1352 			ste_wait(sc);
1353 		}else{
1354 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1355 			sc->ste_tx_prev->ste_ptr->ste_next
1356 				= cur_tx->ste_phys;
1357 		}
1358 
1359 		sc->ste_tx_prev = cur_tx;
1360 
1361 #if NBPFILTER > 0
1362 		/*
1363 		 * If there's a BPF listener, bounce a copy of this frame
1364 		 * to him.
1365 	 	 */
1366 		if (ifp->if_bpf)
1367 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1368 			    BPF_DIRECTION_OUT);
1369 #endif
1370 
1371 		STE_INC(idx, STE_TX_LIST_CNT);
1372 		ifp->if_timer = 5;
1373 	}
1374 	sc->ste_cdata.ste_tx_prod = idx;
1375 }
1376 
1377 void
1378 ste_watchdog(struct ifnet *ifp)
1379 {
1380 	struct ste_softc	*sc;
1381 
1382 	sc = ifp->if_softc;
1383 
1384 	ifp->if_oerrors++;
1385 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1386 
1387 	ste_txeoc(sc);
1388 	ste_txeof(sc);
1389 	ste_rxeoc(sc);
1390 	ste_rxeof(sc);
1391 	ste_init(sc);
1392 
1393 	if (!ifq_empty(&ifp->if_snd))
1394 		ste_start(ifp);
1395 }
1396