xref: /openbsd-src/sys/dev/pci/if_wb.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_wb.c,v 1.72 2020/07/10 13:26:38 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $
35  */
36 
37 /*
38  * Winbond fast ethernet PCI NIC driver
39  *
40  * Supports various cheap network adapters based on the Winbond W89C840F
41  * fast ethernet controller chip. This includes adapters manufactured by
42  * Winbond itself and some made by Linksys.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The Winbond W89C840F chip is a bus master; in some ways it resembles
51  * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52  * one major difference which is that while the registers do many of
53  * the same things as a tulip adapter, the offsets are different: where
54  * tulip registers are typically spaced 8 bytes apart, the Winbond
55  * registers are spaced 4 bytes apart. The receiver filter is also
56  * programmed differently.
57  *
58  * Like the tulip, the Winbond chip uses small descriptors containing
59  * a status word, a control word and 32-bit areas that can either be used
60  * to point to two external data blocks, or to point to a single block
61  * and another descriptor in a linked list. Descriptors can be grouped
62  * together in blocks to form fixed length rings or can be chained
63  * together in linked lists. A single packet may be spread out over
64  * several descriptors if necessary.
65  *
66  * For the receive ring, this driver uses a linked list of descriptors,
67  * each pointing to a single mbuf cluster buffer, which us large enough
68  * to hold an entire packet. The link list is looped back to created a
69  * closed ring.
70  *
71  * For transmission, the driver creates a linked list of 'super descriptors'
72  * which each contain several individual descriptors linked together.
73  * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74  * abuse as fragment pointers. This allows us to use a buffer management
75  * scheme very similar to that used in the ThunderLAN and Etherlink XL
76  * drivers.
77  *
78  * Autonegotiation is performed using the external PHY via the MII bus.
79  * The sample boards I have all use a Davicom PHY.
80  *
81  * Note: the author of the Linux driver for the Winbond chip alludes
82  * to some sort of flaw in the chip's design that seems to mandate some
83  * drastic workaround which significantly impairs transmit performance.
84  * I have no idea what he's on about: transmit performance with all
85  * three of my test boards seems fine.
86  */
87 
88 #include "bpfilter.h"
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99 #include <sys/timeout.h>
100 
101 #include <net/if.h>
102 
103 #include <netinet/in.h>
104 #include <netinet/if_ether.h>
105 
106 #include <net/if_media.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <uvm/uvm_extern.h>		/* for vtophys */
113 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
114 
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/pci/pcireg.h>
118 #include <dev/pci/pcivar.h>
119 #include <dev/pci/pcidevs.h>
120 
121 #define WB_USEIOSPACE
122 
123 /* #define WB_BACKGROUND_AUTONEG */
124 
125 #include <dev/pci/if_wbreg.h>
126 
127 int wb_probe(struct device *, void *, void *);
128 void wb_attach(struct device *, struct device *, void *);
129 
130 void wb_bfree(caddr_t, u_int, void *);
131 void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *);
132 int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
133 
134 void wb_rxeof(struct wb_softc *);
135 void wb_rxeoc(struct wb_softc *);
136 void wb_txeof(struct wb_softc *);
137 void wb_txeoc(struct wb_softc *);
138 int wb_intr(void *);
139 void wb_tick(void *);
140 void wb_start(struct ifnet *);
141 int wb_ioctl(struct ifnet *, u_long, caddr_t);
142 void wb_init(void *);
143 void wb_stop(struct wb_softc *);
144 void wb_watchdog(struct ifnet *);
145 int wb_ifmedia_upd(struct ifnet *);
146 void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
147 
148 void wb_eeprom_putbyte(struct wb_softc *, int);
149 void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
150 void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
151 void wb_mii_sync(struct wb_softc *);
152 void wb_mii_send(struct wb_softc *, u_int32_t, int);
153 int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
154 int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
155 
156 void wb_setcfg(struct wb_softc *, uint64_t);
157 void wb_setmulti(struct wb_softc *);
158 void wb_reset(struct wb_softc *);
159 void wb_fixmedia(struct wb_softc *);
160 int wb_list_rx_init(struct wb_softc *);
161 int wb_list_tx_init(struct wb_softc *);
162 
163 int wb_miibus_readreg(struct device *, int, int);
164 void wb_miibus_writereg(struct device *, int, int, int);
165 void wb_miibus_statchg(struct device *);
166 
167 #define WB_SETBIT(sc, reg, x)				\
168 	CSR_WRITE_4(sc, reg,				\
169 		CSR_READ_4(sc, reg) | x)
170 
171 #define WB_CLRBIT(sc, reg, x)				\
172 	CSR_WRITE_4(sc, reg,				\
173 		CSR_READ_4(sc, reg) & ~x)
174 
175 #define SIO_SET(x)					\
176 	CSR_WRITE_4(sc, WB_SIO,				\
177 		CSR_READ_4(sc, WB_SIO) | x)
178 
179 #define SIO_CLR(x)					\
180 	CSR_WRITE_4(sc, WB_SIO,				\
181 		CSR_READ_4(sc, WB_SIO) & ~x)
182 
183 /*
184  * Send a read command and address to the EEPROM, check for ACK.
185  */
186 void wb_eeprom_putbyte(sc, addr)
187 	struct wb_softc		*sc;
188 	int			addr;
189 {
190 	int			d, i;
191 
192 	d = addr | WB_EECMD_READ;
193 
194 	/*
195 	 * Feed in each bit and strobe the clock.
196 	 */
197 	for (i = 0x400; i; i >>= 1) {
198 		if (d & i) {
199 			SIO_SET(WB_SIO_EE_DATAIN);
200 		} else {
201 			SIO_CLR(WB_SIO_EE_DATAIN);
202 		}
203 		DELAY(100);
204 		SIO_SET(WB_SIO_EE_CLK);
205 		DELAY(150);
206 		SIO_CLR(WB_SIO_EE_CLK);
207 		DELAY(100);
208 	}
209 
210 	return;
211 }
212 
213 /*
214  * Read a word of data stored in the EEPROM at address 'addr.'
215  */
216 void wb_eeprom_getword(sc, addr, dest)
217 	struct wb_softc		*sc;
218 	int			addr;
219 	u_int16_t		*dest;
220 {
221 	int			i;
222 	u_int16_t		word = 0;
223 
224 	/* Enter EEPROM access mode. */
225 	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
226 
227 	/*
228 	 * Send address of word we want to read.
229 	 */
230 	wb_eeprom_putbyte(sc, addr);
231 
232 	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
233 
234 	/*
235 	 * Start reading bits from EEPROM.
236 	 */
237 	for (i = 0x8000; i; i >>= 1) {
238 		SIO_SET(WB_SIO_EE_CLK);
239 		DELAY(100);
240 		if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
241 			word |= i;
242 		SIO_CLR(WB_SIO_EE_CLK);
243 		DELAY(100);
244 	}
245 
246 	/* Turn off EEPROM access mode. */
247 	CSR_WRITE_4(sc, WB_SIO, 0);
248 
249 	*dest = word;
250 
251 	return;
252 }
253 
254 /*
255  * Read a sequence of words from the EEPROM.
256  */
257 void wb_read_eeprom(sc, dest, off, cnt, swap)
258 	struct wb_softc		*sc;
259 	caddr_t			dest;
260 	int			off;
261 	int			cnt;
262 	int			swap;
263 {
264 	int			i;
265 	u_int16_t		word = 0, *ptr;
266 
267 	for (i = 0; i < cnt; i++) {
268 		wb_eeprom_getword(sc, off + i, &word);
269 		ptr = (u_int16_t *)(dest + (i * 2));
270 		if (swap)
271 			*ptr = ntohs(word);
272 		else
273 			*ptr = word;
274 	}
275 
276 	return;
277 }
278 
279 /*
280  * Sync the PHYs by setting data bit and strobing the clock 32 times.
281  */
282 void wb_mii_sync(sc)
283 	struct wb_softc		*sc;
284 {
285 	int			i;
286 
287 	SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
288 
289 	for (i = 0; i < 32; i++) {
290 		SIO_SET(WB_SIO_MII_CLK);
291 		DELAY(1);
292 		SIO_CLR(WB_SIO_MII_CLK);
293 		DELAY(1);
294 	}
295 
296 	return;
297 }
298 
299 /*
300  * Clock a series of bits through the MII.
301  */
302 void wb_mii_send(sc, bits, cnt)
303 	struct wb_softc		*sc;
304 	u_int32_t		bits;
305 	int			cnt;
306 {
307 	int			i;
308 
309 	SIO_CLR(WB_SIO_MII_CLK);
310 
311 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
312                 if (bits & i) {
313 			SIO_SET(WB_SIO_MII_DATAIN);
314                 } else {
315 			SIO_CLR(WB_SIO_MII_DATAIN);
316                 }
317 		DELAY(1);
318 		SIO_CLR(WB_SIO_MII_CLK);
319 		DELAY(1);
320 		SIO_SET(WB_SIO_MII_CLK);
321 	}
322 }
323 
324 /*
325  * Read an PHY register through the MII.
326  */
327 int wb_mii_readreg(sc, frame)
328 	struct wb_softc		*sc;
329 	struct wb_mii_frame	*frame;
330 
331 {
332 	int			i, ack, s;
333 
334 	s = splnet();
335 
336 	/*
337 	 * Set up frame for RX.
338 	 */
339 	frame->mii_stdelim = WB_MII_STARTDELIM;
340 	frame->mii_opcode = WB_MII_READOP;
341 	frame->mii_turnaround = 0;
342 	frame->mii_data = 0;
343 
344 	CSR_WRITE_4(sc, WB_SIO, 0);
345 
346 	/*
347  	 * Turn on data xmit.
348 	 */
349 	SIO_SET(WB_SIO_MII_DIR);
350 
351 	wb_mii_sync(sc);
352 
353 	/*
354 	 * Send command/address info.
355 	 */
356 	wb_mii_send(sc, frame->mii_stdelim, 2);
357 	wb_mii_send(sc, frame->mii_opcode, 2);
358 	wb_mii_send(sc, frame->mii_phyaddr, 5);
359 	wb_mii_send(sc, frame->mii_regaddr, 5);
360 
361 	/* Idle bit */
362 	SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
363 	DELAY(1);
364 	SIO_SET(WB_SIO_MII_CLK);
365 	DELAY(1);
366 
367 	/* Turn off xmit. */
368 	SIO_CLR(WB_SIO_MII_DIR);
369 	/* Check for ack */
370 	SIO_CLR(WB_SIO_MII_CLK);
371 	DELAY(1);
372 	ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
373 	SIO_SET(WB_SIO_MII_CLK);
374 	DELAY(1);
375 	SIO_CLR(WB_SIO_MII_CLK);
376 	DELAY(1);
377 	SIO_SET(WB_SIO_MII_CLK);
378 	DELAY(1);
379 
380 	/*
381 	 * Now try reading data bits. If the ack failed, we still
382 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
383 	 */
384 	if (ack) {
385 		for(i = 0; i < 16; i++) {
386 			SIO_CLR(WB_SIO_MII_CLK);
387 			DELAY(1);
388 			SIO_SET(WB_SIO_MII_CLK);
389 			DELAY(1);
390 		}
391 		goto fail;
392 	}
393 
394 	for (i = 0x8000; i; i >>= 1) {
395 		SIO_CLR(WB_SIO_MII_CLK);
396 		DELAY(1);
397 		if (!ack) {
398 			if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
399 				frame->mii_data |= i;
400 			DELAY(1);
401 		}
402 		SIO_SET(WB_SIO_MII_CLK);
403 		DELAY(1);
404 	}
405 
406 fail:
407 
408 	SIO_CLR(WB_SIO_MII_CLK);
409 	DELAY(1);
410 	SIO_SET(WB_SIO_MII_CLK);
411 	DELAY(1);
412 
413 	splx(s);
414 
415 	if (ack)
416 		return(1);
417 	return(0);
418 }
419 
420 /*
421  * Write to a PHY register through the MII.
422  */
423 int wb_mii_writereg(sc, frame)
424 	struct wb_softc		*sc;
425 	struct wb_mii_frame	*frame;
426 
427 {
428 	int			s;
429 
430 	s = splnet();
431 	/*
432 	 * Set up frame for TX.
433 	 */
434 
435 	frame->mii_stdelim = WB_MII_STARTDELIM;
436 	frame->mii_opcode = WB_MII_WRITEOP;
437 	frame->mii_turnaround = WB_MII_TURNAROUND;
438 
439 	/*
440  	 * Turn on data output.
441 	 */
442 	SIO_SET(WB_SIO_MII_DIR);
443 
444 	wb_mii_sync(sc);
445 
446 	wb_mii_send(sc, frame->mii_stdelim, 2);
447 	wb_mii_send(sc, frame->mii_opcode, 2);
448 	wb_mii_send(sc, frame->mii_phyaddr, 5);
449 	wb_mii_send(sc, frame->mii_regaddr, 5);
450 	wb_mii_send(sc, frame->mii_turnaround, 2);
451 	wb_mii_send(sc, frame->mii_data, 16);
452 
453 	/* Idle bit. */
454 	SIO_SET(WB_SIO_MII_CLK);
455 	DELAY(1);
456 	SIO_CLR(WB_SIO_MII_CLK);
457 	DELAY(1);
458 
459 	/*
460 	 * Turn off xmit.
461 	 */
462 	SIO_CLR(WB_SIO_MII_DIR);
463 
464 	splx(s);
465 
466 	return(0);
467 }
468 
469 int
470 wb_miibus_readreg(dev, phy, reg)
471 	struct device *dev;
472 	int phy, reg;
473 {
474 	struct wb_softc *sc = (struct wb_softc *)dev;
475 	struct wb_mii_frame frame;
476 
477 	bzero(&frame, sizeof(frame));
478 
479 	frame.mii_phyaddr = phy;
480 	frame.mii_regaddr = reg;
481 	wb_mii_readreg(sc, &frame);
482 
483 	return(frame.mii_data);
484 }
485 
486 void
487 wb_miibus_writereg(dev, phy, reg, data)
488 	struct device *dev;
489 	int phy, reg, data;
490 {
491 	struct wb_softc *sc = (struct wb_softc *)dev;
492 	struct wb_mii_frame frame;
493 
494 	bzero(&frame, sizeof(frame));
495 
496 	frame.mii_phyaddr = phy;
497 	frame.mii_regaddr = reg;
498 	frame.mii_data = data;
499 
500 	wb_mii_writereg(sc, &frame);
501 
502 	return;
503 }
504 
505 void
506 wb_miibus_statchg(dev)
507 	struct device *dev;
508 {
509 	struct wb_softc *sc = (struct wb_softc *)dev;
510 
511 	wb_setcfg(sc, sc->sc_mii.mii_media_active);
512 }
513 
514 /*
515  * Program the 64-bit multicast hash filter.
516  */
517 void wb_setmulti(sc)
518 	struct wb_softc		*sc;
519 {
520 	struct ifnet		*ifp;
521 	int			h = 0;
522 	u_int32_t		hashes[2] = { 0, 0 };
523 	struct arpcom		*ac = &sc->arpcom;
524 	struct ether_multi	*enm;
525 	struct ether_multistep	step;
526 	u_int32_t		rxfilt;
527 	int			mcnt = 0;
528 
529 	ifp = &sc->arpcom.ac_if;
530 
531 	rxfilt = CSR_READ_4(sc, WB_NETCFG);
532 
533 	if (ac->ac_multirangecnt > 0)
534 		ifp->if_flags |= IFF_ALLMULTI;
535 
536 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
537 		rxfilt |= WB_NETCFG_RX_MULTI;
538 		CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
539 		CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
540 		CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
541 		return;
542 	}
543 
544 	/* first, zot all the existing hash bits */
545 	CSR_WRITE_4(sc, WB_MAR0, 0);
546 	CSR_WRITE_4(sc, WB_MAR1, 0);
547 
548 	/* now program new ones */
549 	ETHER_FIRST_MULTI(step, ac, enm);
550 	while (enm != NULL) {
551 		h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26);
552 		if (h < 32)
553 			hashes[0] |= (1 << h);
554 		else
555 			hashes[1] |= (1 << (h - 32));
556 		mcnt++;
557 		ETHER_NEXT_MULTI(step, enm);
558 	}
559 
560 	if (mcnt)
561 		rxfilt |= WB_NETCFG_RX_MULTI;
562 	else
563 		rxfilt &= ~WB_NETCFG_RX_MULTI;
564 
565 	CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
566 	CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
567 	CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
568 
569 	return;
570 }
571 
572 /*
573  * The Winbond manual states that in order to fiddle with the
574  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
575  * first have to put the transmit and/or receive logic in the idle state.
576  */
577 void
578 wb_setcfg(sc, media)
579 	struct wb_softc *sc;
580 	uint64_t media;
581 {
582 	int			i, restart = 0;
583 
584 	if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
585 		restart = 1;
586 		WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
587 
588 		for (i = 0; i < WB_TIMEOUT; i++) {
589 			DELAY(10);
590 			if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
591 				(CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
592 				break;
593 		}
594 
595 		if (i == WB_TIMEOUT)
596 			printf("%s: failed to force tx and "
597 				"rx to idle state\n", sc->sc_dev.dv_xname);
598 	}
599 
600 	if (IFM_SUBTYPE(media) == IFM_10_T)
601 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
602 	else
603 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
604 
605 	if ((media & IFM_GMASK) == IFM_FDX)
606 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
607 	else
608 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
609 
610 	if (restart)
611 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
612 
613 	return;
614 }
615 
616 void
617 wb_reset(sc)
618 	struct wb_softc *sc;
619 {
620 	int i;
621 	struct mii_data *mii = &sc->sc_mii;
622 
623 	CSR_WRITE_4(sc, WB_NETCFG, 0);
624 	CSR_WRITE_4(sc, WB_BUSCTL, 0);
625 	CSR_WRITE_4(sc, WB_TXADDR, 0);
626 	CSR_WRITE_4(sc, WB_RXADDR, 0);
627 
628 	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
629 	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
630 
631 	for (i = 0; i < WB_TIMEOUT; i++) {
632 		DELAY(10);
633 		if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
634 			break;
635 	}
636 	if (i == WB_TIMEOUT)
637 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
638 
639 	/* Wait a little while for the chip to get its brains in order. */
640 	DELAY(1000);
641 
642 	if (mii->mii_instance) {
643 		struct mii_softc *miisc;
644 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
645 			mii_phy_reset(miisc);
646 	}
647 }
648 
649 void
650 wb_fixmedia(sc)
651 	struct wb_softc *sc;
652 {
653 	struct mii_data *mii = &sc->sc_mii;
654 	uint64_t media;
655 
656 	if (LIST_FIRST(&mii->mii_phys) == NULL)
657 		return;
658 
659 	mii_pollstat(mii);
660 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
661 		media = mii->mii_media_active & ~IFM_10_T;
662 		media |= IFM_100_TX;
663 	} else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
664 		media = mii->mii_media_active & ~IFM_100_TX;
665 		media |= IFM_10_T;
666 	} else
667 		return;
668 
669 	ifmedia_set(&mii->mii_media, media);
670 }
671 
672 const struct pci_matchid wb_devices[] = {
673 	{ PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F },
674 	{ PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX },
675 };
676 
677 /*
678  * Probe for a Winbond chip. Check the PCI vendor and device
679  * IDs against our list and return a device name if we find a match.
680  */
681 int
682 wb_probe(parent, match, aux)
683 	struct device *parent;
684 	void *match, *aux;
685 {
686 	return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices,
687 	    nitems(wb_devices)));
688 }
689 
690 /*
691  * Attach the interface. Allocate softc structures, do ifmedia
692  * setup and ethernet/BPF attach.
693  */
694 void
695 wb_attach(parent, self, aux)
696 	struct device *parent, *self;
697 	void *aux;
698 {
699 	struct wb_softc *sc = (struct wb_softc *)self;
700 	struct pci_attach_args *pa = aux;
701 	pci_chipset_tag_t pc = pa->pa_pc;
702 	pci_intr_handle_t ih;
703 	const char *intrstr = NULL;
704 	struct ifnet *ifp = &sc->arpcom.ac_if;
705 	bus_size_t size;
706 	int rseg;
707 	bus_dma_segment_t seg;
708 	bus_dmamap_t dmamap;
709 	caddr_t kva;
710 
711 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
712 
713 	/*
714 	 * Map control/status registers.
715 	 */
716 
717 #ifdef WB_USEIOSPACE
718 	if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
719 	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) {
720 		printf(": can't map i/o space\n");
721 		return;
722 	}
723 #else
724 	if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
725 	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){
726 		printf(": can't map mem space\n");
727 		return;
728 	}
729 #endif
730 
731 	/* Allocate interrupt */
732 	if (pci_intr_map(pa, &ih)) {
733 		printf(": couldn't map interrupt\n");
734 		goto fail_1;
735 	}
736 	intrstr = pci_intr_string(pc, ih);
737 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc,
738 	    self->dv_xname);
739 	if (sc->sc_ih == NULL) {
740 		printf(": couldn't establish interrupt");
741 		if (intrstr != NULL)
742 			printf(" at %s", intrstr);
743 		printf("\n");
744 		goto fail_1;
745 	}
746 	printf(": %s", intrstr);
747 
748 	sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff;
749 
750 	/* Reset the adapter. */
751 	wb_reset(sc);
752 
753 	/*
754 	 * Get station address from the EEPROM.
755 	 */
756 	wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0);
757 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
758 
759 	if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data),
760 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
761 		printf(": can't alloc list data\n");
762 		goto fail_2;
763 	}
764 	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg,
765 	    sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) {
766 		printf(": can't map list data, size %zd\n",
767 		    sizeof(struct wb_list_data));
768 		goto fail_3;
769 	}
770 	if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1,
771 	    sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
772 		printf(": can't create dma map\n");
773 		goto fail_4;
774 	}
775 	if (bus_dmamap_load(pa->pa_dmat, dmamap, kva,
776 	    sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) {
777 		printf(": can't load dma map\n");
778 		goto fail_5;
779 	}
780 	sc->wb_ldata = (struct wb_list_data *)kva;
781 
782 	ifp->if_softc = sc;
783 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
784 	ifp->if_ioctl = wb_ioctl;
785 	ifp->if_start = wb_start;
786 	ifp->if_watchdog = wb_watchdog;
787 	ifq_set_maxlen(&ifp->if_snd, WB_TX_LIST_CNT - 1);
788 
789 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
790 
791 	/*
792 	 * Do ifmedia setup.
793 	 */
794 	wb_stop(sc);
795 
796 	ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts);
797 	sc->sc_mii.mii_ifp = ifp;
798 	sc->sc_mii.mii_readreg = wb_miibus_readreg;
799 	sc->sc_mii.mii_writereg = wb_miibus_writereg;
800 	sc->sc_mii.mii_statchg = wb_miibus_statchg;
801 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
802 	    0);
803 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
804 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL);
805 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
806 	} else
807 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
808 
809 	/*
810 	 * Call MI attach routines.
811 	 */
812 	if_attach(ifp);
813 	ether_ifattach(ifp);
814 	return;
815 
816 fail_5:
817 	bus_dmamap_destroy(pa->pa_dmat, dmamap);
818 
819 fail_4:
820 	bus_dmamem_unmap(pa->pa_dmat, kva,
821 	    sizeof(struct wb_list_data));
822 
823 fail_3:
824 	bus_dmamem_free(pa->pa_dmat, &seg, rseg);
825 
826 fail_2:
827 	pci_intr_disestablish(pc, sc->sc_ih);
828 
829 fail_1:
830 	bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size);
831 }
832 
833 /*
834  * Initialize the transmit descriptors.
835  */
836 int wb_list_tx_init(sc)
837 	struct wb_softc		*sc;
838 {
839 	struct wb_chain_data	*cd;
840 	struct wb_list_data	*ld;
841 	int			i;
842 
843 	cd = &sc->wb_cdata;
844 	ld = sc->wb_ldata;
845 
846 	for (i = 0; i < WB_TX_LIST_CNT; i++) {
847 		cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
848 		if (i == (WB_TX_LIST_CNT - 1)) {
849 			cd->wb_tx_chain[i].wb_nextdesc =
850 				&cd->wb_tx_chain[0];
851 		} else {
852 			cd->wb_tx_chain[i].wb_nextdesc =
853 				&cd->wb_tx_chain[i + 1];
854 		}
855 	}
856 
857 	cd->wb_tx_free = &cd->wb_tx_chain[0];
858 	cd->wb_tx_tail = cd->wb_tx_head = NULL;
859 
860 	return(0);
861 }
862 
863 
864 /*
865  * Initialize the RX descriptors and allocate mbufs for them. Note that
866  * we arrange the descriptors in a closed ring, so that the last descriptor
867  * points back to the first.
868  */
869 int wb_list_rx_init(sc)
870 	struct wb_softc		*sc;
871 {
872 	struct wb_chain_data	*cd;
873 	struct wb_list_data	*ld;
874 	int			i;
875 
876 	cd = &sc->wb_cdata;
877 	ld = sc->wb_ldata;
878 
879 	for (i = 0; i < WB_RX_LIST_CNT; i++) {
880 		cd->wb_rx_chain[i].wb_ptr =
881 			(struct wb_desc *)&ld->wb_rx_list[i];
882 		cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
883 		wb_newbuf(sc, &cd->wb_rx_chain[i]);
884 		if (i == (WB_RX_LIST_CNT - 1)) {
885 			cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
886 			ld->wb_rx_list[i].wb_next =
887 					VTOPHYS(&ld->wb_rx_list[0]);
888 		} else {
889 			cd->wb_rx_chain[i].wb_nextdesc =
890 					&cd->wb_rx_chain[i + 1];
891 			ld->wb_rx_list[i].wb_next =
892 					VTOPHYS(&ld->wb_rx_list[i + 1]);
893 		}
894 	}
895 
896 	cd->wb_rx_head = &cd->wb_rx_chain[0];
897 
898 	return(0);
899 }
900 
901 /*
902  * Initialize an RX descriptor and attach an MBUF cluster.
903  */
904 void
905 wb_newbuf(sc, c)
906 	struct wb_softc *sc;
907 	struct wb_chain_onefrag *c;
908 {
909 	c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t));
910 	c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN;
911 	c->wb_ptr->wb_status = WB_RXSTAT;
912 }
913 
914 /*
915  * A frame has been uploaded: pass the resulting mbuf chain up to
916  * the higher level protocols.
917  */
918 void wb_rxeof(sc)
919 	struct wb_softc		*sc;
920 {
921 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
922         struct ifnet		*ifp;
923 	struct wb_chain_onefrag	*cur_rx;
924 	int			total_len = 0;
925 	u_int32_t		rxstat;
926 
927 	ifp = &sc->arpcom.ac_if;
928 
929 	while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
930 							WB_RXSTAT_OWN)) {
931 		struct mbuf *m;
932 
933 		cur_rx = sc->wb_cdata.wb_rx_head;
934 		sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
935 
936 		if ((rxstat & WB_RXSTAT_MIIERR) ||
937 		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
938 		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) ||
939 		    !(rxstat & WB_RXSTAT_LASTFRAG) ||
940 		    !(rxstat & WB_RXSTAT_RXCMP)) {
941 			ifp->if_ierrors++;
942 			wb_newbuf(sc, cur_rx);
943 			printf("%s: receiver babbling: possible chip "
944 				"bug, forcing reset\n", sc->sc_dev.dv_xname);
945 			wb_fixmedia(sc);
946 			wb_init(sc);
947 			break;
948 		}
949 
950 		if (rxstat & WB_RXSTAT_RXERR) {
951 			ifp->if_ierrors++;
952 			wb_newbuf(sc, cur_rx);
953 			break;
954 		}
955 
956 		/* No errors; receive the packet. */
957 		total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
958 
959 		/*
960 		 * XXX The Winbond chip includes the CRC with every
961 		 * received frame, and there's no way to turn this
962 		 * behavior off (at least, I can't find anything in
963 	 	 * the manual that explains how to do it) so we have
964 		 * to trim off the CRC manually.
965 		 */
966 		total_len -= ETHER_CRC_LEN;
967 
968 		m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len,
969 		    ETHER_ALIGN);
970 		wb_newbuf(sc, cur_rx);
971 		if (m == NULL) {
972 			ifp->if_ierrors++;
973 			break;
974 		}
975 
976 		ml_enqueue(&ml, m);
977 	}
978 
979 	if_input(ifp, &ml);
980 }
981 
982 void wb_rxeoc(sc)
983 	struct wb_softc		*sc;
984 {
985 	wb_rxeof(sc);
986 
987 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
988 	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
989 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
990 	if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
991 		CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
992 
993 	return;
994 }
995 
996 /*
997  * A frame was downloaded to the chip. It's safe for us to clean up
998  * the list buffers.
999  */
1000 void wb_txeof(sc)
1001 	struct wb_softc		*sc;
1002 {
1003 	struct wb_chain		*cur_tx;
1004 	struct ifnet		*ifp;
1005 
1006 	ifp = &sc->arpcom.ac_if;
1007 
1008 	/* Clear the timeout timer. */
1009 	ifp->if_timer = 0;
1010 
1011 	if (sc->wb_cdata.wb_tx_head == NULL)
1012 		return;
1013 
1014 	/*
1015 	 * Go through our tx list and free mbufs for those
1016 	 * frames that have been transmitted.
1017 	 */
1018 	while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
1019 		u_int32_t		txstat;
1020 
1021 		cur_tx = sc->wb_cdata.wb_tx_head;
1022 		txstat = WB_TXSTATUS(cur_tx);
1023 
1024 		if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
1025 			break;
1026 
1027 		if (txstat & WB_TXSTAT_TXERR) {
1028 			ifp->if_oerrors++;
1029 			if (txstat & WB_TXSTAT_ABORT)
1030 				ifp->if_collisions++;
1031 			if (txstat & WB_TXSTAT_LATECOLL)
1032 				ifp->if_collisions++;
1033 		}
1034 
1035 		ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1036 
1037 		m_freem(cur_tx->wb_mbuf);
1038 		cur_tx->wb_mbuf = NULL;
1039 
1040 		if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1041 			sc->wb_cdata.wb_tx_head = NULL;
1042 			sc->wb_cdata.wb_tx_tail = NULL;
1043 			break;
1044 		}
1045 
1046 		sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1047 	}
1048 
1049 	return;
1050 }
1051 
1052 /*
1053  * TX 'end of channel' interrupt handler.
1054  */
1055 void wb_txeoc(sc)
1056 	struct wb_softc		*sc;
1057 {
1058 	struct ifnet		*ifp;
1059 
1060 	ifp = &sc->arpcom.ac_if;
1061 
1062 	ifp->if_timer = 0;
1063 
1064 	if (sc->wb_cdata.wb_tx_head == NULL) {
1065 		ifq_clr_oactive(&ifp->if_snd);
1066 		sc->wb_cdata.wb_tx_tail = NULL;
1067 	} else {
1068 		if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1069 			WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1070 			ifp->if_timer = 5;
1071 			CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1072 		}
1073 	}
1074 
1075 	return;
1076 }
1077 
1078 int wb_intr(arg)
1079 	void			*arg;
1080 {
1081 	struct wb_softc		*sc;
1082 	struct ifnet		*ifp;
1083 	u_int32_t		status;
1084 	int			r = 0;
1085 
1086 	sc = arg;
1087 	ifp = &sc->arpcom.ac_if;
1088 
1089 	if (!(ifp->if_flags & IFF_UP))
1090 		return (r);
1091 
1092 	/* Disable interrupts. */
1093 	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1094 
1095 	for (;;) {
1096 
1097 		status = CSR_READ_4(sc, WB_ISR);
1098 		if (status)
1099 			CSR_WRITE_4(sc, WB_ISR, status);
1100 
1101 		if ((status & WB_INTRS) == 0)
1102 			break;
1103 
1104 		r = 1;
1105 
1106 		if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1107 			ifp->if_ierrors++;
1108 			wb_reset(sc);
1109 			if (status & WB_ISR_RX_ERR)
1110 				wb_fixmedia(sc);
1111 			wb_init(sc);
1112 			continue;
1113 		}
1114 
1115 		if (status & WB_ISR_RX_OK)
1116 			wb_rxeof(sc);
1117 
1118 		if (status & WB_ISR_RX_IDLE)
1119 			wb_rxeoc(sc);
1120 
1121 		if (status & WB_ISR_TX_OK)
1122 			wb_txeof(sc);
1123 
1124 		if (status & WB_ISR_TX_NOBUF)
1125 			wb_txeoc(sc);
1126 
1127 		if (status & WB_ISR_TX_IDLE) {
1128 			wb_txeof(sc);
1129 			if (sc->wb_cdata.wb_tx_head != NULL) {
1130 				WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1131 				CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1132 			}
1133 		}
1134 
1135 		if (status & WB_ISR_TX_UNDERRUN) {
1136 			ifp->if_oerrors++;
1137 			wb_txeof(sc);
1138 			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1139 			/* Jack up TX threshold */
1140 			sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1141 			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1142 			WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1143 			WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1144 		}
1145 
1146 		if (status & WB_ISR_BUS_ERR)
1147 			wb_init(sc);
1148 	}
1149 
1150 	/* Re-enable interrupts. */
1151 	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1152 
1153 	if (!ifq_empty(&ifp->if_snd)) {
1154 		wb_start(ifp);
1155 	}
1156 
1157 	return (r);
1158 }
1159 
1160 void
1161 wb_tick(xsc)
1162 	void *xsc;
1163 {
1164 	struct wb_softc *sc = xsc;
1165 	int s;
1166 
1167 	s = splnet();
1168 	mii_tick(&sc->sc_mii);
1169 	splx(s);
1170 	timeout_add_sec(&sc->wb_tick_tmo, 1);
1171 }
1172 
1173 /*
1174  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1175  * pointers to the fragment pointers.
1176  */
1177 int wb_encap(sc, c, m_head)
1178 	struct wb_softc		*sc;
1179 	struct wb_chain		*c;
1180 	struct mbuf		*m_head;
1181 {
1182 	int			frag = 0;
1183 	struct wb_desc		*f = NULL;
1184 	int			total_len;
1185 	struct mbuf		*m;
1186 
1187 	/*
1188  	 * Start packing the mbufs in this chain into
1189 	 * the fragment pointers. Stop when we run out
1190  	 * of fragments or hit the end of the mbuf chain.
1191 	 */
1192 	m = m_head;
1193 	total_len = 0;
1194 
1195 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1196 		if (m->m_len != 0) {
1197 			if (frag == WB_MAXFRAGS)
1198 				break;
1199 			total_len += m->m_len;
1200 			f = &c->wb_ptr->wb_frag[frag];
1201 			f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1202 			if (frag == 0) {
1203 				f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1204 				f->wb_status = 0;
1205 			} else
1206 				f->wb_status = WB_TXSTAT_OWN;
1207 			f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]);
1208 			f->wb_data = VTOPHYS(mtod(m, vaddr_t));
1209 			frag++;
1210 		}
1211 	}
1212 
1213 	/*
1214 	 * Handle special case: we used up all 16 fragments,
1215 	 * but we have more mbufs left in the chain. Copy the
1216 	 * data into an mbuf cluster. Note that we don't
1217 	 * bother clearing the values in the other fragment
1218 	 * pointers/counters; it wouldn't gain us anything,
1219 	 * and would waste cycles.
1220 	 */
1221 	if (m != NULL) {
1222 		struct mbuf		*m_new = NULL;
1223 
1224 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1225 		if (m_new == NULL)
1226 			return(1);
1227 		if (m_head->m_pkthdr.len > MHLEN) {
1228 			MCLGET(m_new, M_DONTWAIT);
1229 			if (!(m_new->m_flags & M_EXT)) {
1230 				m_freem(m_new);
1231 				return(1);
1232 			}
1233 		}
1234 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1235 					mtod(m_new, caddr_t));
1236 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1237 		m_freem(m_head);
1238 		m_head = m_new;
1239 		f = &c->wb_ptr->wb_frag[0];
1240 		f->wb_status = 0;
1241 		f->wb_data = VTOPHYS(mtod(m_new, caddr_t));
1242 		f->wb_ctl = total_len = m_new->m_len;
1243 		f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1244 		frag = 1;
1245 	}
1246 
1247 	if (total_len < WB_MIN_FRAMELEN) {
1248 		f = &c->wb_ptr->wb_frag[frag];
1249 		f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1250 		f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad);
1251 		f->wb_ctl |= WB_TXCTL_TLINK;
1252 		f->wb_status = WB_TXSTAT_OWN;
1253 		frag++;
1254 	}
1255 
1256 	c->wb_mbuf = m_head;
1257 	c->wb_lastdesc = frag - 1;
1258 	WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1259 	WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1260 
1261 	return(0);
1262 }
1263 
1264 /*
1265  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1266  * to the mbuf data regions directly in the transmit lists. We also save a
1267  * copy of the pointers since the transmit list fragment pointers are
1268  * physical addresses.
1269  */
1270 
1271 void wb_start(ifp)
1272 	struct ifnet		*ifp;
1273 {
1274 	struct wb_softc		*sc;
1275 	struct mbuf		*m_head = NULL;
1276 	struct wb_chain		*cur_tx = NULL, *start_tx;
1277 
1278 	sc = ifp->if_softc;
1279 
1280 	/*
1281 	 * Check for an available queue slot. If there are none,
1282 	 * punt.
1283 	 */
1284 	if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1285 		ifq_set_oactive(&ifp->if_snd);
1286 		return;
1287 	}
1288 
1289 	start_tx = sc->wb_cdata.wb_tx_free;
1290 
1291 	while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1292 		m_head = ifq_dequeue(&ifp->if_snd);
1293 		if (m_head == NULL)
1294 			break;
1295 
1296 		/* Pick a descriptor off the free list. */
1297 		cur_tx = sc->wb_cdata.wb_tx_free;
1298 		sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1299 
1300 		/* Pack the data into the descriptor. */
1301 		wb_encap(sc, cur_tx, m_head);
1302 
1303 		if (cur_tx != start_tx)
1304 			WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1305 
1306 #if NBPFILTER > 0
1307 		/*
1308 		 * If there's a BPF listener, bounce a copy of this frame
1309 		 * to him.
1310 		 */
1311 		if (ifp->if_bpf)
1312 			bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf,
1313 			    BPF_DIRECTION_OUT);
1314 #endif
1315 	}
1316 
1317 	/*
1318 	 * If there are no packets queued, bail.
1319 	 */
1320 	if (cur_tx == NULL)
1321 		return;
1322 
1323 	/*
1324 	 * Place the request for the upload interrupt
1325 	 * in the last descriptor in the chain. This way, if
1326 	 * we're chaining several packets at once, we'll only
1327 	 * get an interrupt once for the whole chain rather than
1328 	 * once for each packet.
1329 	 */
1330 	WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1331 	cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1332 	sc->wb_cdata.wb_tx_tail = cur_tx;
1333 
1334 	if (sc->wb_cdata.wb_tx_head == NULL) {
1335 		sc->wb_cdata.wb_tx_head = start_tx;
1336 		WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1337 		CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1338 	} else {
1339 		/*
1340 		 * We need to distinguish between the case where
1341 		 * the own bit is clear because the chip cleared it
1342 		 * and where the own bit is clear because we haven't
1343 		 * set it yet. The magic value WB_UNSET is just some
1344 		 * ramdomly chosen number which doesn't have the own
1345 	 	 * bit set. When we actually transmit the frame, the
1346 		 * status word will have _only_ the own bit set, so
1347 		 * the txeoc handler will be able to tell if it needs
1348 		 * to initiate another transmission to flush out pending
1349 		 * frames.
1350 		 */
1351 		WB_TXOWN(start_tx) = WB_UNSENT;
1352 	}
1353 
1354 	/*
1355 	 * Set a timeout in case the chip goes out to lunch.
1356 	 */
1357 	ifp->if_timer = 5;
1358 
1359 	return;
1360 }
1361 
1362 void wb_init(xsc)
1363 	void			*xsc;
1364 {
1365 	struct wb_softc *sc = xsc;
1366 	struct ifnet *ifp = &sc->arpcom.ac_if;
1367 	int s, i;
1368 
1369 	s = splnet();
1370 
1371 	/*
1372 	 * Cancel pending I/O and free all RX/TX buffers.
1373 	 */
1374 	wb_stop(sc);
1375 	wb_reset(sc);
1376 
1377 	sc->wb_txthresh = WB_TXTHRESH_INIT;
1378 
1379 	/*
1380 	 * Set cache alignment and burst length.
1381 	 */
1382 #ifdef foo
1383 	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1384 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1385 	WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1386 #endif
1387 
1388 	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1389 	WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1390 	switch(sc->wb_cachesize) {
1391 	case 32:
1392 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1393 		break;
1394 	case 16:
1395 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1396 		break;
1397 	case 8:
1398 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1399 		break;
1400 	case 0:
1401 	default:
1402 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1403 		break;
1404 	}
1405 
1406 	/* This doesn't tend to work too well at 100Mbps. */
1407 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1408 
1409 	/* Init our MAC address */
1410 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1411 		CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]);
1412 	}
1413 
1414 	/* Init circular RX list. */
1415 	if (wb_list_rx_init(sc) == ENOBUFS) {
1416 		printf("%s: initialization failed: no "
1417 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1418 		wb_stop(sc);
1419 		splx(s);
1420 		return;
1421 	}
1422 
1423 	/* Init TX descriptors. */
1424 	wb_list_tx_init(sc);
1425 
1426 	/* If we want promiscuous mode, set the allframes bit. */
1427 	if (ifp->if_flags & IFF_PROMISC) {
1428 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1429 	} else {
1430 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1431 	}
1432 
1433 	/*
1434 	 * Set capture broadcast bit to capture broadcast frames.
1435 	 */
1436 	if (ifp->if_flags & IFF_BROADCAST) {
1437 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1438 	} else {
1439 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1440 	}
1441 
1442 	/*
1443 	 * Program the multicast filter, if necessary.
1444 	 */
1445 	wb_setmulti(sc);
1446 
1447 	/*
1448 	 * Load the address of the RX list.
1449 	 */
1450 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1451 	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1452 
1453 	/*
1454 	 * Enable interrupts.
1455 	 */
1456 	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1457 	CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1458 
1459 	/* Enable receiver and transmitter. */
1460 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1461 	CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1462 
1463 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1464 	CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0]));
1465 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1466 
1467 	ifp->if_flags |= IFF_RUNNING;
1468 	ifq_clr_oactive(&ifp->if_snd);
1469 
1470 	splx(s);
1471 
1472 	timeout_set(&sc->wb_tick_tmo, wb_tick, sc);
1473 	timeout_add_sec(&sc->wb_tick_tmo, 1);
1474 
1475 	return;
1476 }
1477 
1478 /*
1479  * Set media options.
1480  */
1481 int
1482 wb_ifmedia_upd(ifp)
1483 	struct ifnet *ifp;
1484 {
1485 	struct wb_softc *sc = ifp->if_softc;
1486 
1487 	if (ifp->if_flags & IFF_UP)
1488 		wb_init(sc);
1489 
1490 	return(0);
1491 }
1492 
1493 /*
1494  * Report current media status.
1495  */
1496 void
1497 wb_ifmedia_sts(ifp, ifmr)
1498 	struct ifnet		*ifp;
1499 	struct ifmediareq	*ifmr;
1500 {
1501 	struct wb_softc *sc = ifp->if_softc;
1502 	struct mii_data *mii = &sc->sc_mii;
1503 
1504 	mii_pollstat(mii);
1505 	ifmr->ifm_active = mii->mii_media_active;
1506 	ifmr->ifm_status = mii->mii_media_status;
1507 }
1508 
1509 int wb_ioctl(ifp, command, data)
1510 	struct ifnet		*ifp;
1511 	u_long			command;
1512 	caddr_t			data;
1513 {
1514 	struct wb_softc		*sc = ifp->if_softc;
1515 	struct ifreq		*ifr = (struct ifreq *) data;
1516 	int			s, error = 0;
1517 
1518 	s = splnet();
1519 
1520 	switch(command) {
1521 	case SIOCSIFADDR:
1522 		ifp->if_flags |= IFF_UP;
1523 		wb_init(sc);
1524 		break;
1525 
1526 	case SIOCSIFFLAGS:
1527 		if (ifp->if_flags & IFF_UP) {
1528 			wb_init(sc);
1529 		} else {
1530 			if (ifp->if_flags & IFF_RUNNING)
1531 				wb_stop(sc);
1532 		}
1533 		error = 0;
1534 		break;
1535 
1536 	case SIOCGIFMEDIA:
1537 	case SIOCSIFMEDIA:
1538 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1539 		break;
1540 
1541 	default:
1542 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1543 	}
1544 
1545 	if (error == ENETRESET) {
1546 		if (ifp->if_flags & IFF_RUNNING)
1547 			wb_setmulti(sc);
1548 		error = 0;
1549 	}
1550 
1551 	splx(s);
1552 	return(error);
1553 }
1554 
1555 void wb_watchdog(ifp)
1556 	struct ifnet		*ifp;
1557 {
1558 	struct wb_softc		*sc;
1559 
1560 	sc = ifp->if_softc;
1561 
1562 	ifp->if_oerrors++;
1563 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1564 
1565 #ifdef foo
1566 	if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1567 		printf("%s: no carrier - transceiver cable problem?\n",
1568 		    sc->sc_dev.dv_xname);
1569 #endif
1570 	wb_init(sc);
1571 
1572 	if (!ifq_empty(&ifp->if_snd))
1573 		wb_start(ifp);
1574 
1575 	return;
1576 }
1577 
1578 /*
1579  * Stop the adapter and free any mbufs allocated to the
1580  * RX and TX lists.
1581  */
1582 void wb_stop(sc)
1583 	struct wb_softc		*sc;
1584 {
1585 	int			i;
1586 	struct ifnet		*ifp;
1587 
1588 	ifp = &sc->arpcom.ac_if;
1589 	ifp->if_timer = 0;
1590 
1591 	timeout_del(&sc->wb_tick_tmo);
1592 
1593 	ifp->if_flags &= ~IFF_RUNNING;
1594 	ifq_clr_oactive(&ifp->if_snd);
1595 
1596 	WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1597 	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1598 	CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1599 	CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1600 
1601 	/*
1602 	 * Free data in the RX lists.
1603 	 */
1604 	bzero(&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list));
1605 
1606 	/*
1607 	 * Free the TX list buffers.
1608 	 */
1609 	for (i = 0; i < WB_TX_LIST_CNT; i++) {
1610 		if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1611 			m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1612 			sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1613 		}
1614 	}
1615 
1616 	bzero(&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list));
1617 }
1618 
1619 struct cfattach wb_ca = {
1620 	sizeof(struct wb_softc), wb_probe, wb_attach
1621 };
1622 
1623 struct cfdriver wb_cd = {
1624 	NULL, "wb", DV_IFNET
1625 };
1626