xref: /openbsd-src/sys/dev/pci/if_wb.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: if_wb.c,v 1.73 2021/03/05 12:40:13 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $
35  */
36 
37 /*
38  * Winbond fast ethernet PCI NIC driver
39  *
40  * Supports various cheap network adapters based on the Winbond W89C840F
41  * fast ethernet controller chip. This includes adapters manufactured by
42  * Winbond itself and some made by Linksys.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The Winbond W89C840F chip is a bus master; in some ways it resembles
51  * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52  * one major difference which is that while the registers do many of
53  * the same things as a tulip adapter, the offsets are different: where
54  * tulip registers are typically spaced 8 bytes apart, the Winbond
55  * registers are spaced 4 bytes apart. The receiver filter is also
56  * programmed differently.
57  *
58  * Like the tulip, the Winbond chip uses small descriptors containing
59  * a status word, a control word and 32-bit areas that can either be used
60  * to point to two external data blocks, or to point to a single block
61  * and another descriptor in a linked list. Descriptors can be grouped
62  * together in blocks to form fixed length rings or can be chained
63  * together in linked lists. A single packet may be spread out over
64  * several descriptors if necessary.
65  *
66  * For the receive ring, this driver uses a linked list of descriptors,
67  * each pointing to a single mbuf cluster buffer, which us large enough
68  * to hold an entire packet. The link list is looped back to created a
69  * closed ring.
70  *
71  * For transmission, the driver creates a linked list of 'super descriptors'
72  * which each contain several individual descriptors linked together.
73  * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74  * abuse as fragment pointers. This allows us to use a buffer management
75  * scheme very similar to that used in the ThunderLAN and Etherlink XL
76  * drivers.
77  *
78  * Autonegotiation is performed using the external PHY via the MII bus.
79  * The sample boards I have all use a Davicom PHY.
80  *
81  * Note: the author of the Linux driver for the Winbond chip alludes
82  * to some sort of flaw in the chip's design that seems to mandate some
83  * drastic workaround which significantly impairs transmit performance.
84  * I have no idea what he's on about: transmit performance with all
85  * three of my test boards seems fine.
86  */
87 
88 #include "bpfilter.h"
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99 #include <sys/timeout.h>
100 
101 #include <net/if.h>
102 
103 #include <netinet/in.h>
104 #include <netinet/if_ether.h>
105 
106 #include <net/if_media.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <uvm/uvm_extern.h>		/* for vtophys */
113 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
114 
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/pci/pcireg.h>
118 #include <dev/pci/pcivar.h>
119 #include <dev/pci/pcidevs.h>
120 
121 #define WB_USEIOSPACE
122 
123 /* #define WB_BACKGROUND_AUTONEG */
124 
125 #include <dev/pci/if_wbreg.h>
126 
127 int wb_probe(struct device *, void *, void *);
128 void wb_attach(struct device *, struct device *, void *);
129 
130 void wb_bfree(caddr_t, u_int, void *);
131 void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *);
132 int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
133 
134 void wb_rxeof(struct wb_softc *);
135 void wb_rxeoc(struct wb_softc *);
136 void wb_txeof(struct wb_softc *);
137 void wb_txeoc(struct wb_softc *);
138 int wb_intr(void *);
139 void wb_tick(void *);
140 void wb_start(struct ifnet *);
141 int wb_ioctl(struct ifnet *, u_long, caddr_t);
142 void wb_init(void *);
143 void wb_stop(struct wb_softc *);
144 void wb_watchdog(struct ifnet *);
145 int wb_ifmedia_upd(struct ifnet *);
146 void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
147 
148 void wb_eeprom_putbyte(struct wb_softc *, int);
149 void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
150 void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
151 void wb_mii_sync(struct wb_softc *);
152 void wb_mii_send(struct wb_softc *, u_int32_t, int);
153 int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
154 int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
155 
156 void wb_setcfg(struct wb_softc *, uint64_t);
157 void wb_setmulti(struct wb_softc *);
158 void wb_reset(struct wb_softc *);
159 void wb_fixmedia(struct wb_softc *);
160 int wb_list_rx_init(struct wb_softc *);
161 int wb_list_tx_init(struct wb_softc *);
162 
163 int wb_miibus_readreg(struct device *, int, int);
164 void wb_miibus_writereg(struct device *, int, int, int);
165 void wb_miibus_statchg(struct device *);
166 
167 #define WB_SETBIT(sc, reg, x)				\
168 	CSR_WRITE_4(sc, reg,				\
169 		CSR_READ_4(sc, reg) | x)
170 
171 #define WB_CLRBIT(sc, reg, x)				\
172 	CSR_WRITE_4(sc, reg,				\
173 		CSR_READ_4(sc, reg) & ~x)
174 
175 #define SIO_SET(x)					\
176 	CSR_WRITE_4(sc, WB_SIO,				\
177 		CSR_READ_4(sc, WB_SIO) | x)
178 
179 #define SIO_CLR(x)					\
180 	CSR_WRITE_4(sc, WB_SIO,				\
181 		CSR_READ_4(sc, WB_SIO) & ~x)
182 
183 /*
184  * Send a read command and address to the EEPROM, check for ACK.
185  */
186 void
187 wb_eeprom_putbyte(struct wb_softc *sc, int addr)
188 {
189 	int			d, i;
190 
191 	d = addr | WB_EECMD_READ;
192 
193 	/*
194 	 * Feed in each bit and strobe the clock.
195 	 */
196 	for (i = 0x400; i; i >>= 1) {
197 		if (d & i) {
198 			SIO_SET(WB_SIO_EE_DATAIN);
199 		} else {
200 			SIO_CLR(WB_SIO_EE_DATAIN);
201 		}
202 		DELAY(100);
203 		SIO_SET(WB_SIO_EE_CLK);
204 		DELAY(150);
205 		SIO_CLR(WB_SIO_EE_CLK);
206 		DELAY(100);
207 	}
208 
209 	return;
210 }
211 
212 /*
213  * Read a word of data stored in the EEPROM at address 'addr.'
214  */
215 void
216 wb_eeprom_getword(struct wb_softc *sc, int addr, u_int16_t *dest)
217 {
218 	int			i;
219 	u_int16_t		word = 0;
220 
221 	/* Enter EEPROM access mode. */
222 	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
223 
224 	/*
225 	 * Send address of word we want to read.
226 	 */
227 	wb_eeprom_putbyte(sc, addr);
228 
229 	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
230 
231 	/*
232 	 * Start reading bits from EEPROM.
233 	 */
234 	for (i = 0x8000; i; i >>= 1) {
235 		SIO_SET(WB_SIO_EE_CLK);
236 		DELAY(100);
237 		if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
238 			word |= i;
239 		SIO_CLR(WB_SIO_EE_CLK);
240 		DELAY(100);
241 	}
242 
243 	/* Turn off EEPROM access mode. */
244 	CSR_WRITE_4(sc, WB_SIO, 0);
245 
246 	*dest = word;
247 
248 	return;
249 }
250 
251 /*
252  * Read a sequence of words from the EEPROM.
253  */
254 void
255 wb_read_eeprom(struct wb_softc *sc, caddr_t dest, int off, int cnt, int swap)
256 {
257 	int			i;
258 	u_int16_t		word = 0, *ptr;
259 
260 	for (i = 0; i < cnt; i++) {
261 		wb_eeprom_getword(sc, off + i, &word);
262 		ptr = (u_int16_t *)(dest + (i * 2));
263 		if (swap)
264 			*ptr = ntohs(word);
265 		else
266 			*ptr = word;
267 	}
268 
269 	return;
270 }
271 
272 /*
273  * Sync the PHYs by setting data bit and strobing the clock 32 times.
274  */
275 void
276 wb_mii_sync(struct wb_softc *sc)
277 {
278 	int			i;
279 
280 	SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
281 
282 	for (i = 0; i < 32; i++) {
283 		SIO_SET(WB_SIO_MII_CLK);
284 		DELAY(1);
285 		SIO_CLR(WB_SIO_MII_CLK);
286 		DELAY(1);
287 	}
288 
289 	return;
290 }
291 
292 /*
293  * Clock a series of bits through the MII.
294  */
295 void
296 wb_mii_send(struct wb_softc *sc, u_int32_t bits, int cnt)
297 {
298 	int			i;
299 
300 	SIO_CLR(WB_SIO_MII_CLK);
301 
302 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
303                 if (bits & i) {
304 			SIO_SET(WB_SIO_MII_DATAIN);
305                 } else {
306 			SIO_CLR(WB_SIO_MII_DATAIN);
307                 }
308 		DELAY(1);
309 		SIO_CLR(WB_SIO_MII_CLK);
310 		DELAY(1);
311 		SIO_SET(WB_SIO_MII_CLK);
312 	}
313 }
314 
315 /*
316  * Read an PHY register through the MII.
317  */
318 int
319 wb_mii_readreg(struct wb_softc *sc, struct wb_mii_frame *frame)
320 {
321 	int			i, ack, s;
322 
323 	s = splnet();
324 
325 	/*
326 	 * Set up frame for RX.
327 	 */
328 	frame->mii_stdelim = WB_MII_STARTDELIM;
329 	frame->mii_opcode = WB_MII_READOP;
330 	frame->mii_turnaround = 0;
331 	frame->mii_data = 0;
332 
333 	CSR_WRITE_4(sc, WB_SIO, 0);
334 
335 	/*
336  	 * Turn on data xmit.
337 	 */
338 	SIO_SET(WB_SIO_MII_DIR);
339 
340 	wb_mii_sync(sc);
341 
342 	/*
343 	 * Send command/address info.
344 	 */
345 	wb_mii_send(sc, frame->mii_stdelim, 2);
346 	wb_mii_send(sc, frame->mii_opcode, 2);
347 	wb_mii_send(sc, frame->mii_phyaddr, 5);
348 	wb_mii_send(sc, frame->mii_regaddr, 5);
349 
350 	/* Idle bit */
351 	SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
352 	DELAY(1);
353 	SIO_SET(WB_SIO_MII_CLK);
354 	DELAY(1);
355 
356 	/* Turn off xmit. */
357 	SIO_CLR(WB_SIO_MII_DIR);
358 	/* Check for ack */
359 	SIO_CLR(WB_SIO_MII_CLK);
360 	DELAY(1);
361 	ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
362 	SIO_SET(WB_SIO_MII_CLK);
363 	DELAY(1);
364 	SIO_CLR(WB_SIO_MII_CLK);
365 	DELAY(1);
366 	SIO_SET(WB_SIO_MII_CLK);
367 	DELAY(1);
368 
369 	/*
370 	 * Now try reading data bits. If the ack failed, we still
371 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
372 	 */
373 	if (ack) {
374 		for(i = 0; i < 16; i++) {
375 			SIO_CLR(WB_SIO_MII_CLK);
376 			DELAY(1);
377 			SIO_SET(WB_SIO_MII_CLK);
378 			DELAY(1);
379 		}
380 		goto fail;
381 	}
382 
383 	for (i = 0x8000; i; i >>= 1) {
384 		SIO_CLR(WB_SIO_MII_CLK);
385 		DELAY(1);
386 		if (!ack) {
387 			if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
388 				frame->mii_data |= i;
389 			DELAY(1);
390 		}
391 		SIO_SET(WB_SIO_MII_CLK);
392 		DELAY(1);
393 	}
394 
395 fail:
396 
397 	SIO_CLR(WB_SIO_MII_CLK);
398 	DELAY(1);
399 	SIO_SET(WB_SIO_MII_CLK);
400 	DELAY(1);
401 
402 	splx(s);
403 
404 	if (ack)
405 		return(1);
406 	return(0);
407 }
408 
409 /*
410  * Write to a PHY register through the MII.
411  */
412 int
413 wb_mii_writereg(struct wb_softc *sc, struct wb_mii_frame *frame)
414 {
415 	int			s;
416 
417 	s = splnet();
418 	/*
419 	 * Set up frame for TX.
420 	 */
421 
422 	frame->mii_stdelim = WB_MII_STARTDELIM;
423 	frame->mii_opcode = WB_MII_WRITEOP;
424 	frame->mii_turnaround = WB_MII_TURNAROUND;
425 
426 	/*
427  	 * Turn on data output.
428 	 */
429 	SIO_SET(WB_SIO_MII_DIR);
430 
431 	wb_mii_sync(sc);
432 
433 	wb_mii_send(sc, frame->mii_stdelim, 2);
434 	wb_mii_send(sc, frame->mii_opcode, 2);
435 	wb_mii_send(sc, frame->mii_phyaddr, 5);
436 	wb_mii_send(sc, frame->mii_regaddr, 5);
437 	wb_mii_send(sc, frame->mii_turnaround, 2);
438 	wb_mii_send(sc, frame->mii_data, 16);
439 
440 	/* Idle bit. */
441 	SIO_SET(WB_SIO_MII_CLK);
442 	DELAY(1);
443 	SIO_CLR(WB_SIO_MII_CLK);
444 	DELAY(1);
445 
446 	/*
447 	 * Turn off xmit.
448 	 */
449 	SIO_CLR(WB_SIO_MII_DIR);
450 
451 	splx(s);
452 
453 	return(0);
454 }
455 
456 int
457 wb_miibus_readreg(struct device *dev, int phy, int reg)
458 {
459 	struct wb_softc *sc = (struct wb_softc *)dev;
460 	struct wb_mii_frame frame;
461 
462 	bzero(&frame, sizeof(frame));
463 
464 	frame.mii_phyaddr = phy;
465 	frame.mii_regaddr = reg;
466 	wb_mii_readreg(sc, &frame);
467 
468 	return(frame.mii_data);
469 }
470 
471 void
472 wb_miibus_writereg(struct device *dev, int phy, int reg, int data)
473 {
474 	struct wb_softc *sc = (struct wb_softc *)dev;
475 	struct wb_mii_frame frame;
476 
477 	bzero(&frame, sizeof(frame));
478 
479 	frame.mii_phyaddr = phy;
480 	frame.mii_regaddr = reg;
481 	frame.mii_data = data;
482 
483 	wb_mii_writereg(sc, &frame);
484 
485 	return;
486 }
487 
488 void
489 wb_miibus_statchg(struct device *dev)
490 {
491 	struct wb_softc *sc = (struct wb_softc *)dev;
492 
493 	wb_setcfg(sc, sc->sc_mii.mii_media_active);
494 }
495 
496 /*
497  * Program the 64-bit multicast hash filter.
498  */
499 void
500 wb_setmulti(struct wb_softc *sc)
501 {
502 	struct ifnet		*ifp;
503 	int			h = 0;
504 	u_int32_t		hashes[2] = { 0, 0 };
505 	struct arpcom		*ac = &sc->arpcom;
506 	struct ether_multi	*enm;
507 	struct ether_multistep	step;
508 	u_int32_t		rxfilt;
509 	int			mcnt = 0;
510 
511 	ifp = &sc->arpcom.ac_if;
512 
513 	rxfilt = CSR_READ_4(sc, WB_NETCFG);
514 
515 	if (ac->ac_multirangecnt > 0)
516 		ifp->if_flags |= IFF_ALLMULTI;
517 
518 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
519 		rxfilt |= WB_NETCFG_RX_MULTI;
520 		CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
521 		CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
522 		CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
523 		return;
524 	}
525 
526 	/* first, zot all the existing hash bits */
527 	CSR_WRITE_4(sc, WB_MAR0, 0);
528 	CSR_WRITE_4(sc, WB_MAR1, 0);
529 
530 	/* now program new ones */
531 	ETHER_FIRST_MULTI(step, ac, enm);
532 	while (enm != NULL) {
533 		h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26);
534 		if (h < 32)
535 			hashes[0] |= (1 << h);
536 		else
537 			hashes[1] |= (1 << (h - 32));
538 		mcnt++;
539 		ETHER_NEXT_MULTI(step, enm);
540 	}
541 
542 	if (mcnt)
543 		rxfilt |= WB_NETCFG_RX_MULTI;
544 	else
545 		rxfilt &= ~WB_NETCFG_RX_MULTI;
546 
547 	CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
548 	CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
549 	CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
550 
551 	return;
552 }
553 
554 /*
555  * The Winbond manual states that in order to fiddle with the
556  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
557  * first have to put the transmit and/or receive logic in the idle state.
558  */
559 void
560 wb_setcfg(struct wb_softc *sc, uint64_t media)
561 {
562 	int			i, restart = 0;
563 
564 	if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
565 		restart = 1;
566 		WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
567 
568 		for (i = 0; i < WB_TIMEOUT; i++) {
569 			DELAY(10);
570 			if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
571 				(CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
572 				break;
573 		}
574 
575 		if (i == WB_TIMEOUT)
576 			printf("%s: failed to force tx and "
577 				"rx to idle state\n", sc->sc_dev.dv_xname);
578 	}
579 
580 	if (IFM_SUBTYPE(media) == IFM_10_T)
581 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
582 	else
583 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
584 
585 	if ((media & IFM_GMASK) == IFM_FDX)
586 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
587 	else
588 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
589 
590 	if (restart)
591 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
592 
593 	return;
594 }
595 
596 void
597 wb_reset(struct wb_softc *sc)
598 {
599 	int i;
600 	struct mii_data *mii = &sc->sc_mii;
601 
602 	CSR_WRITE_4(sc, WB_NETCFG, 0);
603 	CSR_WRITE_4(sc, WB_BUSCTL, 0);
604 	CSR_WRITE_4(sc, WB_TXADDR, 0);
605 	CSR_WRITE_4(sc, WB_RXADDR, 0);
606 
607 	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
608 	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
609 
610 	for (i = 0; i < WB_TIMEOUT; i++) {
611 		DELAY(10);
612 		if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
613 			break;
614 	}
615 	if (i == WB_TIMEOUT)
616 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
617 
618 	/* Wait a little while for the chip to get its brains in order. */
619 	DELAY(1000);
620 
621 	if (mii->mii_instance) {
622 		struct mii_softc *miisc;
623 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
624 			mii_phy_reset(miisc);
625 	}
626 }
627 
628 void
629 wb_fixmedia(struct wb_softc *sc)
630 {
631 	struct mii_data *mii = &sc->sc_mii;
632 	uint64_t media;
633 
634 	if (LIST_FIRST(&mii->mii_phys) == NULL)
635 		return;
636 
637 	mii_pollstat(mii);
638 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
639 		media = mii->mii_media_active & ~IFM_10_T;
640 		media |= IFM_100_TX;
641 	} else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
642 		media = mii->mii_media_active & ~IFM_100_TX;
643 		media |= IFM_10_T;
644 	} else
645 		return;
646 
647 	ifmedia_set(&mii->mii_media, media);
648 }
649 
650 const struct pci_matchid wb_devices[] = {
651 	{ PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F },
652 	{ PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX },
653 };
654 
655 /*
656  * Probe for a Winbond chip. Check the PCI vendor and device
657  * IDs against our list and return a device name if we find a match.
658  */
659 int
660 wb_probe(struct device *parent, void *match, void *aux)
661 {
662 	return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices,
663 	    nitems(wb_devices)));
664 }
665 
666 /*
667  * Attach the interface. Allocate softc structures, do ifmedia
668  * setup and ethernet/BPF attach.
669  */
670 void
671 wb_attach(struct device *parent, struct device *self, void *aux)
672 {
673 	struct wb_softc *sc = (struct wb_softc *)self;
674 	struct pci_attach_args *pa = aux;
675 	pci_chipset_tag_t pc = pa->pa_pc;
676 	pci_intr_handle_t ih;
677 	const char *intrstr = NULL;
678 	struct ifnet *ifp = &sc->arpcom.ac_if;
679 	bus_size_t size;
680 	int rseg;
681 	bus_dma_segment_t seg;
682 	bus_dmamap_t dmamap;
683 	caddr_t kva;
684 
685 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
686 
687 	/*
688 	 * Map control/status registers.
689 	 */
690 
691 #ifdef WB_USEIOSPACE
692 	if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
693 	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) {
694 		printf(": can't map i/o space\n");
695 		return;
696 	}
697 #else
698 	if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
699 	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){
700 		printf(": can't map mem space\n");
701 		return;
702 	}
703 #endif
704 
705 	/* Allocate interrupt */
706 	if (pci_intr_map(pa, &ih)) {
707 		printf(": couldn't map interrupt\n");
708 		goto fail_1;
709 	}
710 	intrstr = pci_intr_string(pc, ih);
711 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc,
712 	    self->dv_xname);
713 	if (sc->sc_ih == NULL) {
714 		printf(": couldn't establish interrupt");
715 		if (intrstr != NULL)
716 			printf(" at %s", intrstr);
717 		printf("\n");
718 		goto fail_1;
719 	}
720 	printf(": %s", intrstr);
721 
722 	sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff;
723 
724 	/* Reset the adapter. */
725 	wb_reset(sc);
726 
727 	/*
728 	 * Get station address from the EEPROM.
729 	 */
730 	wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0);
731 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
732 
733 	if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data),
734 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
735 		printf(": can't alloc list data\n");
736 		goto fail_2;
737 	}
738 	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg,
739 	    sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) {
740 		printf(": can't map list data, size %zd\n",
741 		    sizeof(struct wb_list_data));
742 		goto fail_3;
743 	}
744 	if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1,
745 	    sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
746 		printf(": can't create dma map\n");
747 		goto fail_4;
748 	}
749 	if (bus_dmamap_load(pa->pa_dmat, dmamap, kva,
750 	    sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) {
751 		printf(": can't load dma map\n");
752 		goto fail_5;
753 	}
754 	sc->wb_ldata = (struct wb_list_data *)kva;
755 
756 	ifp->if_softc = sc;
757 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
758 	ifp->if_ioctl = wb_ioctl;
759 	ifp->if_start = wb_start;
760 	ifp->if_watchdog = wb_watchdog;
761 	ifq_set_maxlen(&ifp->if_snd, WB_TX_LIST_CNT - 1);
762 
763 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
764 
765 	/*
766 	 * Do ifmedia setup.
767 	 */
768 	wb_stop(sc);
769 
770 	ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts);
771 	sc->sc_mii.mii_ifp = ifp;
772 	sc->sc_mii.mii_readreg = wb_miibus_readreg;
773 	sc->sc_mii.mii_writereg = wb_miibus_writereg;
774 	sc->sc_mii.mii_statchg = wb_miibus_statchg;
775 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
776 	    0);
777 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
778 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL);
779 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
780 	} else
781 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
782 
783 	/*
784 	 * Call MI attach routines.
785 	 */
786 	if_attach(ifp);
787 	ether_ifattach(ifp);
788 	return;
789 
790 fail_5:
791 	bus_dmamap_destroy(pa->pa_dmat, dmamap);
792 
793 fail_4:
794 	bus_dmamem_unmap(pa->pa_dmat, kva,
795 	    sizeof(struct wb_list_data));
796 
797 fail_3:
798 	bus_dmamem_free(pa->pa_dmat, &seg, rseg);
799 
800 fail_2:
801 	pci_intr_disestablish(pc, sc->sc_ih);
802 
803 fail_1:
804 	bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size);
805 }
806 
807 /*
808  * Initialize the transmit descriptors.
809  */
810 int
811 wb_list_tx_init(struct wb_softc *sc)
812 {
813 	struct wb_chain_data	*cd;
814 	struct wb_list_data	*ld;
815 	int			i;
816 
817 	cd = &sc->wb_cdata;
818 	ld = sc->wb_ldata;
819 
820 	for (i = 0; i < WB_TX_LIST_CNT; i++) {
821 		cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
822 		if (i == (WB_TX_LIST_CNT - 1)) {
823 			cd->wb_tx_chain[i].wb_nextdesc =
824 				&cd->wb_tx_chain[0];
825 		} else {
826 			cd->wb_tx_chain[i].wb_nextdesc =
827 				&cd->wb_tx_chain[i + 1];
828 		}
829 	}
830 
831 	cd->wb_tx_free = &cd->wb_tx_chain[0];
832 	cd->wb_tx_tail = cd->wb_tx_head = NULL;
833 
834 	return(0);
835 }
836 
837 
838 /*
839  * Initialize the RX descriptors and allocate mbufs for them. Note that
840  * we arrange the descriptors in a closed ring, so that the last descriptor
841  * points back to the first.
842  */
843 int
844 wb_list_rx_init(struct wb_softc *sc)
845 {
846 	struct wb_chain_data	*cd;
847 	struct wb_list_data	*ld;
848 	int			i;
849 
850 	cd = &sc->wb_cdata;
851 	ld = sc->wb_ldata;
852 
853 	for (i = 0; i < WB_RX_LIST_CNT; i++) {
854 		cd->wb_rx_chain[i].wb_ptr =
855 			(struct wb_desc *)&ld->wb_rx_list[i];
856 		cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
857 		wb_newbuf(sc, &cd->wb_rx_chain[i]);
858 		if (i == (WB_RX_LIST_CNT - 1)) {
859 			cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
860 			ld->wb_rx_list[i].wb_next =
861 					VTOPHYS(&ld->wb_rx_list[0]);
862 		} else {
863 			cd->wb_rx_chain[i].wb_nextdesc =
864 					&cd->wb_rx_chain[i + 1];
865 			ld->wb_rx_list[i].wb_next =
866 					VTOPHYS(&ld->wb_rx_list[i + 1]);
867 		}
868 	}
869 
870 	cd->wb_rx_head = &cd->wb_rx_chain[0];
871 
872 	return(0);
873 }
874 
875 /*
876  * Initialize an RX descriptor and attach an MBUF cluster.
877  */
878 void
879 wb_newbuf(struct wb_softc *sc, struct wb_chain_onefrag *c)
880 {
881 	c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t));
882 	c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN;
883 	c->wb_ptr->wb_status = WB_RXSTAT;
884 }
885 
886 /*
887  * A frame has been uploaded: pass the resulting mbuf chain up to
888  * the higher level protocols.
889  */
890 void
891 wb_rxeof(struct wb_softc *sc)
892 {
893 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
894         struct ifnet		*ifp;
895 	struct wb_chain_onefrag	*cur_rx;
896 	int			total_len = 0;
897 	u_int32_t		rxstat;
898 
899 	ifp = &sc->arpcom.ac_if;
900 
901 	while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
902 							WB_RXSTAT_OWN)) {
903 		struct mbuf *m;
904 
905 		cur_rx = sc->wb_cdata.wb_rx_head;
906 		sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
907 
908 		if ((rxstat & WB_RXSTAT_MIIERR) ||
909 		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
910 		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) ||
911 		    !(rxstat & WB_RXSTAT_LASTFRAG) ||
912 		    !(rxstat & WB_RXSTAT_RXCMP)) {
913 			ifp->if_ierrors++;
914 			wb_newbuf(sc, cur_rx);
915 			printf("%s: receiver babbling: possible chip "
916 				"bug, forcing reset\n", sc->sc_dev.dv_xname);
917 			wb_fixmedia(sc);
918 			wb_init(sc);
919 			break;
920 		}
921 
922 		if (rxstat & WB_RXSTAT_RXERR) {
923 			ifp->if_ierrors++;
924 			wb_newbuf(sc, cur_rx);
925 			break;
926 		}
927 
928 		/* No errors; receive the packet. */
929 		total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
930 
931 		/*
932 		 * XXX The Winbond chip includes the CRC with every
933 		 * received frame, and there's no way to turn this
934 		 * behavior off (at least, I can't find anything in
935 	 	 * the manual that explains how to do it) so we have
936 		 * to trim off the CRC manually.
937 		 */
938 		total_len -= ETHER_CRC_LEN;
939 
940 		m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len,
941 		    ETHER_ALIGN);
942 		wb_newbuf(sc, cur_rx);
943 		if (m == NULL) {
944 			ifp->if_ierrors++;
945 			break;
946 		}
947 
948 		ml_enqueue(&ml, m);
949 	}
950 
951 	if_input(ifp, &ml);
952 }
953 
954 void
955 wb_rxeoc(struct wb_softc *sc)
956 {
957 	wb_rxeof(sc);
958 
959 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
960 	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
961 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
962 	if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
963 		CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
964 
965 	return;
966 }
967 
968 /*
969  * A frame was downloaded to the chip. It's safe for us to clean up
970  * the list buffers.
971  */
972 void
973 wb_txeof(struct wb_softc *sc)
974 {
975 	struct wb_chain		*cur_tx;
976 	struct ifnet		*ifp;
977 
978 	ifp = &sc->arpcom.ac_if;
979 
980 	/* Clear the timeout timer. */
981 	ifp->if_timer = 0;
982 
983 	if (sc->wb_cdata.wb_tx_head == NULL)
984 		return;
985 
986 	/*
987 	 * Go through our tx list and free mbufs for those
988 	 * frames that have been transmitted.
989 	 */
990 	while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
991 		u_int32_t		txstat;
992 
993 		cur_tx = sc->wb_cdata.wb_tx_head;
994 		txstat = WB_TXSTATUS(cur_tx);
995 
996 		if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
997 			break;
998 
999 		if (txstat & WB_TXSTAT_TXERR) {
1000 			ifp->if_oerrors++;
1001 			if (txstat & WB_TXSTAT_ABORT)
1002 				ifp->if_collisions++;
1003 			if (txstat & WB_TXSTAT_LATECOLL)
1004 				ifp->if_collisions++;
1005 		}
1006 
1007 		ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1008 
1009 		m_freem(cur_tx->wb_mbuf);
1010 		cur_tx->wb_mbuf = NULL;
1011 
1012 		if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1013 			sc->wb_cdata.wb_tx_head = NULL;
1014 			sc->wb_cdata.wb_tx_tail = NULL;
1015 			break;
1016 		}
1017 
1018 		sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1019 	}
1020 
1021 	return;
1022 }
1023 
1024 /*
1025  * TX 'end of channel' interrupt handler.
1026  */
1027 void
1028 wb_txeoc(struct wb_softc *sc)
1029 {
1030 	struct ifnet		*ifp;
1031 
1032 	ifp = &sc->arpcom.ac_if;
1033 
1034 	ifp->if_timer = 0;
1035 
1036 	if (sc->wb_cdata.wb_tx_head == NULL) {
1037 		ifq_clr_oactive(&ifp->if_snd);
1038 		sc->wb_cdata.wb_tx_tail = NULL;
1039 	} else {
1040 		if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1041 			WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1042 			ifp->if_timer = 5;
1043 			CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1044 		}
1045 	}
1046 
1047 	return;
1048 }
1049 
1050 int
1051 wb_intr(void *arg)
1052 {
1053 	struct wb_softc		*sc;
1054 	struct ifnet		*ifp;
1055 	u_int32_t		status;
1056 	int			r = 0;
1057 
1058 	sc = arg;
1059 	ifp = &sc->arpcom.ac_if;
1060 
1061 	if (!(ifp->if_flags & IFF_UP))
1062 		return (r);
1063 
1064 	/* Disable interrupts. */
1065 	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1066 
1067 	for (;;) {
1068 
1069 		status = CSR_READ_4(sc, WB_ISR);
1070 		if (status)
1071 			CSR_WRITE_4(sc, WB_ISR, status);
1072 
1073 		if ((status & WB_INTRS) == 0)
1074 			break;
1075 
1076 		r = 1;
1077 
1078 		if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1079 			ifp->if_ierrors++;
1080 			wb_reset(sc);
1081 			if (status & WB_ISR_RX_ERR)
1082 				wb_fixmedia(sc);
1083 			wb_init(sc);
1084 			continue;
1085 		}
1086 
1087 		if (status & WB_ISR_RX_OK)
1088 			wb_rxeof(sc);
1089 
1090 		if (status & WB_ISR_RX_IDLE)
1091 			wb_rxeoc(sc);
1092 
1093 		if (status & WB_ISR_TX_OK)
1094 			wb_txeof(sc);
1095 
1096 		if (status & WB_ISR_TX_NOBUF)
1097 			wb_txeoc(sc);
1098 
1099 		if (status & WB_ISR_TX_IDLE) {
1100 			wb_txeof(sc);
1101 			if (sc->wb_cdata.wb_tx_head != NULL) {
1102 				WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1103 				CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1104 			}
1105 		}
1106 
1107 		if (status & WB_ISR_TX_UNDERRUN) {
1108 			ifp->if_oerrors++;
1109 			wb_txeof(sc);
1110 			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1111 			/* Jack up TX threshold */
1112 			sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1113 			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1114 			WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1115 			WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1116 		}
1117 
1118 		if (status & WB_ISR_BUS_ERR)
1119 			wb_init(sc);
1120 	}
1121 
1122 	/* Re-enable interrupts. */
1123 	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1124 
1125 	if (!ifq_empty(&ifp->if_snd)) {
1126 		wb_start(ifp);
1127 	}
1128 
1129 	return (r);
1130 }
1131 
1132 void
1133 wb_tick(void *xsc)
1134 {
1135 	struct wb_softc *sc = xsc;
1136 	int s;
1137 
1138 	s = splnet();
1139 	mii_tick(&sc->sc_mii);
1140 	splx(s);
1141 	timeout_add_sec(&sc->wb_tick_tmo, 1);
1142 }
1143 
1144 /*
1145  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1146  * pointers to the fragment pointers.
1147  */
1148 int
1149 wb_encap(struct wb_softc *sc, struct wb_chain *c, struct mbuf *m_head)
1150 {
1151 	int			frag = 0;
1152 	struct wb_desc		*f = NULL;
1153 	int			total_len;
1154 	struct mbuf		*m;
1155 
1156 	/*
1157  	 * Start packing the mbufs in this chain into
1158 	 * the fragment pointers. Stop when we run out
1159  	 * of fragments or hit the end of the mbuf chain.
1160 	 */
1161 	m = m_head;
1162 	total_len = 0;
1163 
1164 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1165 		if (m->m_len != 0) {
1166 			if (frag == WB_MAXFRAGS)
1167 				break;
1168 			total_len += m->m_len;
1169 			f = &c->wb_ptr->wb_frag[frag];
1170 			f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1171 			if (frag == 0) {
1172 				f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1173 				f->wb_status = 0;
1174 			} else
1175 				f->wb_status = WB_TXSTAT_OWN;
1176 			f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]);
1177 			f->wb_data = VTOPHYS(mtod(m, vaddr_t));
1178 			frag++;
1179 		}
1180 	}
1181 
1182 	/*
1183 	 * Handle special case: we used up all 16 fragments,
1184 	 * but we have more mbufs left in the chain. Copy the
1185 	 * data into an mbuf cluster. Note that we don't
1186 	 * bother clearing the values in the other fragment
1187 	 * pointers/counters; it wouldn't gain us anything,
1188 	 * and would waste cycles.
1189 	 */
1190 	if (m != NULL) {
1191 		struct mbuf		*m_new = NULL;
1192 
1193 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1194 		if (m_new == NULL)
1195 			return(1);
1196 		if (m_head->m_pkthdr.len > MHLEN) {
1197 			MCLGET(m_new, M_DONTWAIT);
1198 			if (!(m_new->m_flags & M_EXT)) {
1199 				m_freem(m_new);
1200 				return(1);
1201 			}
1202 		}
1203 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1204 					mtod(m_new, caddr_t));
1205 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1206 		m_freem(m_head);
1207 		m_head = m_new;
1208 		f = &c->wb_ptr->wb_frag[0];
1209 		f->wb_status = 0;
1210 		f->wb_data = VTOPHYS(mtod(m_new, caddr_t));
1211 		f->wb_ctl = total_len = m_new->m_len;
1212 		f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1213 		frag = 1;
1214 	}
1215 
1216 	if (total_len < WB_MIN_FRAMELEN) {
1217 		f = &c->wb_ptr->wb_frag[frag];
1218 		f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1219 		f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad);
1220 		f->wb_ctl |= WB_TXCTL_TLINK;
1221 		f->wb_status = WB_TXSTAT_OWN;
1222 		frag++;
1223 	}
1224 
1225 	c->wb_mbuf = m_head;
1226 	c->wb_lastdesc = frag - 1;
1227 	WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1228 	WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1229 
1230 	return(0);
1231 }
1232 
1233 /*
1234  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1235  * to the mbuf data regions directly in the transmit lists. We also save a
1236  * copy of the pointers since the transmit list fragment pointers are
1237  * physical addresses.
1238  */
1239 void
1240 wb_start(struct ifnet *ifp)
1241 {
1242 	struct wb_softc		*sc;
1243 	struct mbuf		*m_head = NULL;
1244 	struct wb_chain		*cur_tx = NULL, *start_tx;
1245 
1246 	sc = ifp->if_softc;
1247 
1248 	/*
1249 	 * Check for an available queue slot. If there are none,
1250 	 * punt.
1251 	 */
1252 	if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1253 		ifq_set_oactive(&ifp->if_snd);
1254 		return;
1255 	}
1256 
1257 	start_tx = sc->wb_cdata.wb_tx_free;
1258 
1259 	while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1260 		m_head = ifq_dequeue(&ifp->if_snd);
1261 		if (m_head == NULL)
1262 			break;
1263 
1264 		/* Pick a descriptor off the free list. */
1265 		cur_tx = sc->wb_cdata.wb_tx_free;
1266 		sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1267 
1268 		/* Pack the data into the descriptor. */
1269 		wb_encap(sc, cur_tx, m_head);
1270 
1271 		if (cur_tx != start_tx)
1272 			WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1273 
1274 #if NBPFILTER > 0
1275 		/*
1276 		 * If there's a BPF listener, bounce a copy of this frame
1277 		 * to him.
1278 		 */
1279 		if (ifp->if_bpf)
1280 			bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf,
1281 			    BPF_DIRECTION_OUT);
1282 #endif
1283 	}
1284 
1285 	/*
1286 	 * If there are no packets queued, bail.
1287 	 */
1288 	if (cur_tx == NULL)
1289 		return;
1290 
1291 	/*
1292 	 * Place the request for the upload interrupt
1293 	 * in the last descriptor in the chain. This way, if
1294 	 * we're chaining several packets at once, we'll only
1295 	 * get an interrupt once for the whole chain rather than
1296 	 * once for each packet.
1297 	 */
1298 	WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1299 	cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1300 	sc->wb_cdata.wb_tx_tail = cur_tx;
1301 
1302 	if (sc->wb_cdata.wb_tx_head == NULL) {
1303 		sc->wb_cdata.wb_tx_head = start_tx;
1304 		WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1305 		CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1306 	} else {
1307 		/*
1308 		 * We need to distinguish between the case where
1309 		 * the own bit is clear because the chip cleared it
1310 		 * and where the own bit is clear because we haven't
1311 		 * set it yet. The magic value WB_UNSET is just some
1312 		 * ramdomly chosen number which doesn't have the own
1313 	 	 * bit set. When we actually transmit the frame, the
1314 		 * status word will have _only_ the own bit set, so
1315 		 * the txeoc handler will be able to tell if it needs
1316 		 * to initiate another transmission to flush out pending
1317 		 * frames.
1318 		 */
1319 		WB_TXOWN(start_tx) = WB_UNSENT;
1320 	}
1321 
1322 	/*
1323 	 * Set a timeout in case the chip goes out to lunch.
1324 	 */
1325 	ifp->if_timer = 5;
1326 
1327 	return;
1328 }
1329 
1330 void
1331 wb_init(void *xsc)
1332 {
1333 	struct wb_softc *sc = xsc;
1334 	struct ifnet *ifp = &sc->arpcom.ac_if;
1335 	int s, i;
1336 
1337 	s = splnet();
1338 
1339 	/*
1340 	 * Cancel pending I/O and free all RX/TX buffers.
1341 	 */
1342 	wb_stop(sc);
1343 	wb_reset(sc);
1344 
1345 	sc->wb_txthresh = WB_TXTHRESH_INIT;
1346 
1347 	/*
1348 	 * Set cache alignment and burst length.
1349 	 */
1350 #ifdef foo
1351 	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1352 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1353 	WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1354 #endif
1355 
1356 	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1357 	WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1358 	switch(sc->wb_cachesize) {
1359 	case 32:
1360 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1361 		break;
1362 	case 16:
1363 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1364 		break;
1365 	case 8:
1366 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1367 		break;
1368 	case 0:
1369 	default:
1370 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1371 		break;
1372 	}
1373 
1374 	/* This doesn't tend to work too well at 100Mbps. */
1375 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1376 
1377 	/* Init our MAC address */
1378 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1379 		CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]);
1380 	}
1381 
1382 	/* Init circular RX list. */
1383 	if (wb_list_rx_init(sc) == ENOBUFS) {
1384 		printf("%s: initialization failed: no "
1385 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1386 		wb_stop(sc);
1387 		splx(s);
1388 		return;
1389 	}
1390 
1391 	/* Init TX descriptors. */
1392 	wb_list_tx_init(sc);
1393 
1394 	/* If we want promiscuous mode, set the allframes bit. */
1395 	if (ifp->if_flags & IFF_PROMISC) {
1396 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1397 	} else {
1398 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1399 	}
1400 
1401 	/*
1402 	 * Set capture broadcast bit to capture broadcast frames.
1403 	 */
1404 	if (ifp->if_flags & IFF_BROADCAST) {
1405 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1406 	} else {
1407 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1408 	}
1409 
1410 	/*
1411 	 * Program the multicast filter, if necessary.
1412 	 */
1413 	wb_setmulti(sc);
1414 
1415 	/*
1416 	 * Load the address of the RX list.
1417 	 */
1418 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1419 	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1420 
1421 	/*
1422 	 * Enable interrupts.
1423 	 */
1424 	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1425 	CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1426 
1427 	/* Enable receiver and transmitter. */
1428 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1429 	CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1430 
1431 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1432 	CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0]));
1433 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1434 
1435 	ifp->if_flags |= IFF_RUNNING;
1436 	ifq_clr_oactive(&ifp->if_snd);
1437 
1438 	splx(s);
1439 
1440 	timeout_set(&sc->wb_tick_tmo, wb_tick, sc);
1441 	timeout_add_sec(&sc->wb_tick_tmo, 1);
1442 
1443 	return;
1444 }
1445 
1446 /*
1447  * Set media options.
1448  */
1449 int
1450 wb_ifmedia_upd(struct ifnet *ifp)
1451 {
1452 	struct wb_softc *sc = ifp->if_softc;
1453 
1454 	if (ifp->if_flags & IFF_UP)
1455 		wb_init(sc);
1456 
1457 	return(0);
1458 }
1459 
1460 /*
1461  * Report current media status.
1462  */
1463 void
1464 wb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1465 {
1466 	struct wb_softc *sc = ifp->if_softc;
1467 	struct mii_data *mii = &sc->sc_mii;
1468 
1469 	mii_pollstat(mii);
1470 	ifmr->ifm_active = mii->mii_media_active;
1471 	ifmr->ifm_status = mii->mii_media_status;
1472 }
1473 
1474 int
1475 wb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1476 {
1477 	struct wb_softc		*sc = ifp->if_softc;
1478 	struct ifreq		*ifr = (struct ifreq *) data;
1479 	int			s, error = 0;
1480 
1481 	s = splnet();
1482 
1483 	switch(command) {
1484 	case SIOCSIFADDR:
1485 		ifp->if_flags |= IFF_UP;
1486 		wb_init(sc);
1487 		break;
1488 
1489 	case SIOCSIFFLAGS:
1490 		if (ifp->if_flags & IFF_UP) {
1491 			wb_init(sc);
1492 		} else {
1493 			if (ifp->if_flags & IFF_RUNNING)
1494 				wb_stop(sc);
1495 		}
1496 		error = 0;
1497 		break;
1498 
1499 	case SIOCGIFMEDIA:
1500 	case SIOCSIFMEDIA:
1501 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1502 		break;
1503 
1504 	default:
1505 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1506 	}
1507 
1508 	if (error == ENETRESET) {
1509 		if (ifp->if_flags & IFF_RUNNING)
1510 			wb_setmulti(sc);
1511 		error = 0;
1512 	}
1513 
1514 	splx(s);
1515 	return(error);
1516 }
1517 
1518 void
1519 wb_watchdog(struct ifnet *ifp)
1520 {
1521 	struct wb_softc		*sc;
1522 
1523 	sc = ifp->if_softc;
1524 
1525 	ifp->if_oerrors++;
1526 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1527 
1528 #ifdef foo
1529 	if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1530 		printf("%s: no carrier - transceiver cable problem?\n",
1531 		    sc->sc_dev.dv_xname);
1532 #endif
1533 	wb_init(sc);
1534 
1535 	if (!ifq_empty(&ifp->if_snd))
1536 		wb_start(ifp);
1537 
1538 	return;
1539 }
1540 
1541 /*
1542  * Stop the adapter and free any mbufs allocated to the
1543  * RX and TX lists.
1544  */
1545 void
1546 wb_stop(struct wb_softc *sc)
1547 {
1548 	int			i;
1549 	struct ifnet		*ifp;
1550 
1551 	ifp = &sc->arpcom.ac_if;
1552 	ifp->if_timer = 0;
1553 
1554 	timeout_del(&sc->wb_tick_tmo);
1555 
1556 	ifp->if_flags &= ~IFF_RUNNING;
1557 	ifq_clr_oactive(&ifp->if_snd);
1558 
1559 	WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1560 	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1561 	CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1562 	CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1563 
1564 	/*
1565 	 * Free data in the RX lists.
1566 	 */
1567 	bzero(&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list));
1568 
1569 	/*
1570 	 * Free the TX list buffers.
1571 	 */
1572 	for (i = 0; i < WB_TX_LIST_CNT; i++) {
1573 		if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1574 			m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1575 			sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1576 		}
1577 	}
1578 
1579 	bzero(&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list));
1580 }
1581 
1582 struct cfattach wb_ca = {
1583 	sizeof(struct wb_softc), wb_probe, wb_attach
1584 };
1585 
1586 struct cfdriver wb_cd = {
1587 	NULL, "wb", DV_IFNET
1588 };
1589