xref: /openbsd-src/sys/dev/pci/if_wb.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: if_wb.c,v 1.67 2016/04/13 10:34:32 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $
35  */
36 
37 /*
38  * Winbond fast ethernet PCI NIC driver
39  *
40  * Supports various cheap network adapters based on the Winbond W89C840F
41  * fast ethernet controller chip. This includes adapters manufactured by
42  * Winbond itself and some made by Linksys.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The Winbond W89C840F chip is a bus master; in some ways it resembles
51  * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52  * one major difference which is that while the registers do many of
53  * the same things as a tulip adapter, the offsets are different: where
54  * tulip registers are typically spaced 8 bytes apart, the Winbond
55  * registers are spaced 4 bytes apart. The receiver filter is also
56  * programmed differently.
57  *
58  * Like the tulip, the Winbond chip uses small descriptors containing
59  * a status word, a control word and 32-bit areas that can either be used
60  * to point to two external data blocks, or to point to a single block
61  * and another descriptor in a linked list. Descriptors can be grouped
62  * together in blocks to form fixed length rings or can be chained
63  * together in linked lists. A single packet may be spread out over
64  * several descriptors if necessary.
65  *
66  * For the receive ring, this driver uses a linked list of descriptors,
67  * each pointing to a single mbuf cluster buffer, which us large enough
68  * to hold an entire packet. The link list is looped back to created a
69  * closed ring.
70  *
71  * For transmission, the driver creates a linked list of 'super descriptors'
72  * which each contain several individual descriptors linked together.
73  * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74  * abuse as fragment pointers. This allows us to use a buffer management
75  * scheme very similar to that used in the ThunderLAN and Etherlink XL
76  * drivers.
77  *
78  * Autonegotiation is performed using the external PHY via the MII bus.
79  * The sample boards I have all use a Davicom PHY.
80  *
81  * Note: the author of the Linux driver for the Winbond chip alludes
82  * to some sort of flaw in the chip's design that seems to mandate some
83  * drastic workaround which significantly impairs transmit performance.
84  * I have no idea what he's on about: transmit performance with all
85  * three of my test boards seems fine.
86  */
87 
88 #include "bpfilter.h"
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/device.h>
98 #include <sys/queue.h>
99 #include <sys/timeout.h>
100 
101 #include <net/if.h>
102 
103 #include <netinet/in.h>
104 #include <netinet/if_ether.h>
105 
106 #include <net/if_media.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <uvm/uvm_extern.h>		/* for vtophys */
113 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
114 
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/pci/pcireg.h>
118 #include <dev/pci/pcivar.h>
119 #include <dev/pci/pcidevs.h>
120 
121 #define WB_USEIOSPACE
122 
123 /* #define WB_BACKGROUND_AUTONEG */
124 
125 #include <dev/pci/if_wbreg.h>
126 
127 int wb_probe(struct device *, void *, void *);
128 void wb_attach(struct device *, struct device *, void *);
129 
130 void wb_bfree(caddr_t, u_int, void *);
131 void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *);
132 int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
133 
134 void wb_rxeof(struct wb_softc *);
135 void wb_rxeoc(struct wb_softc *);
136 void wb_txeof(struct wb_softc *);
137 void wb_txeoc(struct wb_softc *);
138 int wb_intr(void *);
139 void wb_tick(void *);
140 void wb_start(struct ifnet *);
141 int wb_ioctl(struct ifnet *, u_long, caddr_t);
142 void wb_init(void *);
143 void wb_stop(struct wb_softc *);
144 void wb_watchdog(struct ifnet *);
145 int wb_ifmedia_upd(struct ifnet *);
146 void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
147 
148 void wb_eeprom_putbyte(struct wb_softc *, int);
149 void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
150 void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
151 void wb_mii_sync(struct wb_softc *);
152 void wb_mii_send(struct wb_softc *, u_int32_t, int);
153 int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
154 int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
155 
156 void wb_setcfg(struct wb_softc *, uint64_t);
157 u_int8_t wb_calchash(caddr_t);
158 void wb_setmulti(struct wb_softc *);
159 void wb_reset(struct wb_softc *);
160 void wb_fixmedia(struct wb_softc *);
161 int wb_list_rx_init(struct wb_softc *);
162 int wb_list_tx_init(struct wb_softc *);
163 
164 int wb_miibus_readreg(struct device *, int, int);
165 void wb_miibus_writereg(struct device *, int, int, int);
166 void wb_miibus_statchg(struct device *);
167 
168 #define WB_SETBIT(sc, reg, x)				\
169 	CSR_WRITE_4(sc, reg,				\
170 		CSR_READ_4(sc, reg) | x)
171 
172 #define WB_CLRBIT(sc, reg, x)				\
173 	CSR_WRITE_4(sc, reg,				\
174 		CSR_READ_4(sc, reg) & ~x)
175 
176 #define SIO_SET(x)					\
177 	CSR_WRITE_4(sc, WB_SIO,				\
178 		CSR_READ_4(sc, WB_SIO) | x)
179 
180 #define SIO_CLR(x)					\
181 	CSR_WRITE_4(sc, WB_SIO,				\
182 		CSR_READ_4(sc, WB_SIO) & ~x)
183 
184 /*
185  * Send a read command and address to the EEPROM, check for ACK.
186  */
187 void wb_eeprom_putbyte(sc, addr)
188 	struct wb_softc		*sc;
189 	int			addr;
190 {
191 	int			d, i;
192 
193 	d = addr | WB_EECMD_READ;
194 
195 	/*
196 	 * Feed in each bit and strobe the clock.
197 	 */
198 	for (i = 0x400; i; i >>= 1) {
199 		if (d & i) {
200 			SIO_SET(WB_SIO_EE_DATAIN);
201 		} else {
202 			SIO_CLR(WB_SIO_EE_DATAIN);
203 		}
204 		DELAY(100);
205 		SIO_SET(WB_SIO_EE_CLK);
206 		DELAY(150);
207 		SIO_CLR(WB_SIO_EE_CLK);
208 		DELAY(100);
209 	}
210 
211 	return;
212 }
213 
214 /*
215  * Read a word of data stored in the EEPROM at address 'addr.'
216  */
217 void wb_eeprom_getword(sc, addr, dest)
218 	struct wb_softc		*sc;
219 	int			addr;
220 	u_int16_t		*dest;
221 {
222 	int			i;
223 	u_int16_t		word = 0;
224 
225 	/* Enter EEPROM access mode. */
226 	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
227 
228 	/*
229 	 * Send address of word we want to read.
230 	 */
231 	wb_eeprom_putbyte(sc, addr);
232 
233 	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
234 
235 	/*
236 	 * Start reading bits from EEPROM.
237 	 */
238 	for (i = 0x8000; i; i >>= 1) {
239 		SIO_SET(WB_SIO_EE_CLK);
240 		DELAY(100);
241 		if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
242 			word |= i;
243 		SIO_CLR(WB_SIO_EE_CLK);
244 		DELAY(100);
245 	}
246 
247 	/* Turn off EEPROM access mode. */
248 	CSR_WRITE_4(sc, WB_SIO, 0);
249 
250 	*dest = word;
251 
252 	return;
253 }
254 
255 /*
256  * Read a sequence of words from the EEPROM.
257  */
258 void wb_read_eeprom(sc, dest, off, cnt, swap)
259 	struct wb_softc		*sc;
260 	caddr_t			dest;
261 	int			off;
262 	int			cnt;
263 	int			swap;
264 {
265 	int			i;
266 	u_int16_t		word = 0, *ptr;
267 
268 	for (i = 0; i < cnt; i++) {
269 		wb_eeprom_getword(sc, off + i, &word);
270 		ptr = (u_int16_t *)(dest + (i * 2));
271 		if (swap)
272 			*ptr = ntohs(word);
273 		else
274 			*ptr = word;
275 	}
276 
277 	return;
278 }
279 
280 /*
281  * Sync the PHYs by setting data bit and strobing the clock 32 times.
282  */
283 void wb_mii_sync(sc)
284 	struct wb_softc		*sc;
285 {
286 	int			i;
287 
288 	SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
289 
290 	for (i = 0; i < 32; i++) {
291 		SIO_SET(WB_SIO_MII_CLK);
292 		DELAY(1);
293 		SIO_CLR(WB_SIO_MII_CLK);
294 		DELAY(1);
295 	}
296 
297 	return;
298 }
299 
300 /*
301  * Clock a series of bits through the MII.
302  */
303 void wb_mii_send(sc, bits, cnt)
304 	struct wb_softc		*sc;
305 	u_int32_t		bits;
306 	int			cnt;
307 {
308 	int			i;
309 
310 	SIO_CLR(WB_SIO_MII_CLK);
311 
312 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
313                 if (bits & i) {
314 			SIO_SET(WB_SIO_MII_DATAIN);
315                 } else {
316 			SIO_CLR(WB_SIO_MII_DATAIN);
317                 }
318 		DELAY(1);
319 		SIO_CLR(WB_SIO_MII_CLK);
320 		DELAY(1);
321 		SIO_SET(WB_SIO_MII_CLK);
322 	}
323 }
324 
325 /*
326  * Read an PHY register through the MII.
327  */
328 int wb_mii_readreg(sc, frame)
329 	struct wb_softc		*sc;
330 	struct wb_mii_frame	*frame;
331 
332 {
333 	int			i, ack, s;
334 
335 	s = splnet();
336 
337 	/*
338 	 * Set up frame for RX.
339 	 */
340 	frame->mii_stdelim = WB_MII_STARTDELIM;
341 	frame->mii_opcode = WB_MII_READOP;
342 	frame->mii_turnaround = 0;
343 	frame->mii_data = 0;
344 
345 	CSR_WRITE_4(sc, WB_SIO, 0);
346 
347 	/*
348  	 * Turn on data xmit.
349 	 */
350 	SIO_SET(WB_SIO_MII_DIR);
351 
352 	wb_mii_sync(sc);
353 
354 	/*
355 	 * Send command/address info.
356 	 */
357 	wb_mii_send(sc, frame->mii_stdelim, 2);
358 	wb_mii_send(sc, frame->mii_opcode, 2);
359 	wb_mii_send(sc, frame->mii_phyaddr, 5);
360 	wb_mii_send(sc, frame->mii_regaddr, 5);
361 
362 	/* Idle bit */
363 	SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
364 	DELAY(1);
365 	SIO_SET(WB_SIO_MII_CLK);
366 	DELAY(1);
367 
368 	/* Turn off xmit. */
369 	SIO_CLR(WB_SIO_MII_DIR);
370 	/* Check for ack */
371 	SIO_CLR(WB_SIO_MII_CLK);
372 	DELAY(1);
373 	ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
374 	SIO_SET(WB_SIO_MII_CLK);
375 	DELAY(1);
376 	SIO_CLR(WB_SIO_MII_CLK);
377 	DELAY(1);
378 	SIO_SET(WB_SIO_MII_CLK);
379 	DELAY(1);
380 
381 	/*
382 	 * Now try reading data bits. If the ack failed, we still
383 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
384 	 */
385 	if (ack) {
386 		for(i = 0; i < 16; i++) {
387 			SIO_CLR(WB_SIO_MII_CLK);
388 			DELAY(1);
389 			SIO_SET(WB_SIO_MII_CLK);
390 			DELAY(1);
391 		}
392 		goto fail;
393 	}
394 
395 	for (i = 0x8000; i; i >>= 1) {
396 		SIO_CLR(WB_SIO_MII_CLK);
397 		DELAY(1);
398 		if (!ack) {
399 			if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
400 				frame->mii_data |= i;
401 			DELAY(1);
402 		}
403 		SIO_SET(WB_SIO_MII_CLK);
404 		DELAY(1);
405 	}
406 
407 fail:
408 
409 	SIO_CLR(WB_SIO_MII_CLK);
410 	DELAY(1);
411 	SIO_SET(WB_SIO_MII_CLK);
412 	DELAY(1);
413 
414 	splx(s);
415 
416 	if (ack)
417 		return(1);
418 	return(0);
419 }
420 
421 /*
422  * Write to a PHY register through the MII.
423  */
424 int wb_mii_writereg(sc, frame)
425 	struct wb_softc		*sc;
426 	struct wb_mii_frame	*frame;
427 
428 {
429 	int			s;
430 
431 	s = splnet();
432 	/*
433 	 * Set up frame for TX.
434 	 */
435 
436 	frame->mii_stdelim = WB_MII_STARTDELIM;
437 	frame->mii_opcode = WB_MII_WRITEOP;
438 	frame->mii_turnaround = WB_MII_TURNAROUND;
439 
440 	/*
441  	 * Turn on data output.
442 	 */
443 	SIO_SET(WB_SIO_MII_DIR);
444 
445 	wb_mii_sync(sc);
446 
447 	wb_mii_send(sc, frame->mii_stdelim, 2);
448 	wb_mii_send(sc, frame->mii_opcode, 2);
449 	wb_mii_send(sc, frame->mii_phyaddr, 5);
450 	wb_mii_send(sc, frame->mii_regaddr, 5);
451 	wb_mii_send(sc, frame->mii_turnaround, 2);
452 	wb_mii_send(sc, frame->mii_data, 16);
453 
454 	/* Idle bit. */
455 	SIO_SET(WB_SIO_MII_CLK);
456 	DELAY(1);
457 	SIO_CLR(WB_SIO_MII_CLK);
458 	DELAY(1);
459 
460 	/*
461 	 * Turn off xmit.
462 	 */
463 	SIO_CLR(WB_SIO_MII_DIR);
464 
465 	splx(s);
466 
467 	return(0);
468 }
469 
470 int
471 wb_miibus_readreg(dev, phy, reg)
472 	struct device *dev;
473 	int phy, reg;
474 {
475 	struct wb_softc *sc = (struct wb_softc *)dev;
476 	struct wb_mii_frame frame;
477 
478 	bzero(&frame, sizeof(frame));
479 
480 	frame.mii_phyaddr = phy;
481 	frame.mii_regaddr = reg;
482 	wb_mii_readreg(sc, &frame);
483 
484 	return(frame.mii_data);
485 }
486 
487 void
488 wb_miibus_writereg(dev, phy, reg, data)
489 	struct device *dev;
490 	int phy, reg, data;
491 {
492 	struct wb_softc *sc = (struct wb_softc *)dev;
493 	struct wb_mii_frame frame;
494 
495 	bzero(&frame, sizeof(frame));
496 
497 	frame.mii_phyaddr = phy;
498 	frame.mii_regaddr = reg;
499 	frame.mii_data = data;
500 
501 	wb_mii_writereg(sc, &frame);
502 
503 	return;
504 }
505 
506 void
507 wb_miibus_statchg(dev)
508 	struct device *dev;
509 {
510 	struct wb_softc *sc = (struct wb_softc *)dev;
511 
512 	wb_setcfg(sc, sc->sc_mii.mii_media_active);
513 }
514 
515 /*
516  * Program the 64-bit multicast hash filter.
517  */
518 void wb_setmulti(sc)
519 	struct wb_softc		*sc;
520 {
521 	struct ifnet		*ifp;
522 	int			h = 0;
523 	u_int32_t		hashes[2] = { 0, 0 };
524 	struct arpcom		*ac = &sc->arpcom;
525 	struct ether_multi	*enm;
526 	struct ether_multistep	step;
527 	u_int32_t		rxfilt;
528 	int			mcnt = 0;
529 
530 	ifp = &sc->arpcom.ac_if;
531 
532 	rxfilt = CSR_READ_4(sc, WB_NETCFG);
533 
534 	if (ac->ac_multirangecnt > 0)
535 		ifp->if_flags |= IFF_ALLMULTI;
536 
537 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
538 		rxfilt |= WB_NETCFG_RX_MULTI;
539 		CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
540 		CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
541 		CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
542 		return;
543 	}
544 
545 	/* first, zot all the existing hash bits */
546 	CSR_WRITE_4(sc, WB_MAR0, 0);
547 	CSR_WRITE_4(sc, WB_MAR1, 0);
548 
549 	/* now program new ones */
550 	ETHER_FIRST_MULTI(step, ac, enm);
551 	while (enm != NULL) {
552 		h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26);
553 		if (h < 32)
554 			hashes[0] |= (1 << h);
555 		else
556 			hashes[1] |= (1 << (h - 32));
557 		mcnt++;
558 		ETHER_NEXT_MULTI(step, enm);
559 	}
560 
561 	if (mcnt)
562 		rxfilt |= WB_NETCFG_RX_MULTI;
563 	else
564 		rxfilt &= ~WB_NETCFG_RX_MULTI;
565 
566 	CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
567 	CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
568 	CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
569 
570 	return;
571 }
572 
573 /*
574  * The Winbond manual states that in order to fiddle with the
575  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
576  * first have to put the transmit and/or receive logic in the idle state.
577  */
578 void
579 wb_setcfg(sc, media)
580 	struct wb_softc *sc;
581 	uint64_t media;
582 {
583 	int			i, restart = 0;
584 
585 	if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
586 		restart = 1;
587 		WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
588 
589 		for (i = 0; i < WB_TIMEOUT; i++) {
590 			DELAY(10);
591 			if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
592 				(CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
593 				break;
594 		}
595 
596 		if (i == WB_TIMEOUT)
597 			printf("%s: failed to force tx and "
598 				"rx to idle state\n", sc->sc_dev.dv_xname);
599 	}
600 
601 	if (IFM_SUBTYPE(media) == IFM_10_T)
602 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
603 	else
604 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
605 
606 	if ((media & IFM_GMASK) == IFM_FDX)
607 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
608 	else
609 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
610 
611 	if (restart)
612 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
613 
614 	return;
615 }
616 
617 void
618 wb_reset(sc)
619 	struct wb_softc *sc;
620 {
621 	int i;
622 	struct mii_data *mii = &sc->sc_mii;
623 
624 	CSR_WRITE_4(sc, WB_NETCFG, 0);
625 	CSR_WRITE_4(sc, WB_BUSCTL, 0);
626 	CSR_WRITE_4(sc, WB_TXADDR, 0);
627 	CSR_WRITE_4(sc, WB_RXADDR, 0);
628 
629 	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
630 	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
631 
632 	for (i = 0; i < WB_TIMEOUT; i++) {
633 		DELAY(10);
634 		if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
635 			break;
636 	}
637 	if (i == WB_TIMEOUT)
638 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
639 
640 	/* Wait a little while for the chip to get its brains in order. */
641 	DELAY(1000);
642 
643 	if (mii->mii_instance) {
644 		struct mii_softc *miisc;
645 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
646 			mii_phy_reset(miisc);
647 	}
648 }
649 
650 void
651 wb_fixmedia(sc)
652 	struct wb_softc *sc;
653 {
654 	struct mii_data *mii = &sc->sc_mii;
655 	uint64_t media;
656 
657 	if (LIST_FIRST(&mii->mii_phys) == NULL)
658 		return;
659 
660 	mii_pollstat(mii);
661 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
662 		media = mii->mii_media_active & ~IFM_10_T;
663 		media |= IFM_100_TX;
664 	} if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
665 		media = mii->mii_media_active & ~IFM_100_TX;
666 		media |= IFM_10_T;
667 	} else
668 		return;
669 
670 	ifmedia_set(&mii->mii_media, media);
671 }
672 
673 const struct pci_matchid wb_devices[] = {
674 	{ PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F },
675 	{ PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX },
676 };
677 
678 /*
679  * Probe for a Winbond chip. Check the PCI vendor and device
680  * IDs against our list and return a device name if we find a match.
681  */
682 int
683 wb_probe(parent, match, aux)
684 	struct device *parent;
685 	void *match, *aux;
686 {
687 	return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices,
688 	    nitems(wb_devices)));
689 }
690 
691 /*
692  * Attach the interface. Allocate softc structures, do ifmedia
693  * setup and ethernet/BPF attach.
694  */
695 void
696 wb_attach(parent, self, aux)
697 	struct device *parent, *self;
698 	void *aux;
699 {
700 	struct wb_softc *sc = (struct wb_softc *)self;
701 	struct pci_attach_args *pa = aux;
702 	pci_chipset_tag_t pc = pa->pa_pc;
703 	pci_intr_handle_t ih;
704 	const char *intrstr = NULL;
705 	struct ifnet *ifp = &sc->arpcom.ac_if;
706 	bus_size_t size;
707 	int rseg;
708 	bus_dma_segment_t seg;
709 	bus_dmamap_t dmamap;
710 	caddr_t kva;
711 
712 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
713 
714 	/*
715 	 * Map control/status registers.
716 	 */
717 
718 #ifdef WB_USEIOSPACE
719 	if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
720 	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) {
721 		printf(": can't map i/o space\n");
722 		return;
723 	}
724 #else
725 	if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
726 	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){
727 		printf(": can't map mem space\n");
728 		return;
729 	}
730 #endif
731 
732 	/* Allocate interrupt */
733 	if (pci_intr_map(pa, &ih)) {
734 		printf(": couldn't map interrupt\n");
735 		goto fail_1;
736 	}
737 	intrstr = pci_intr_string(pc, ih);
738 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc,
739 	    self->dv_xname);
740 	if (sc->sc_ih == NULL) {
741 		printf(": couldn't establish interrupt");
742 		if (intrstr != NULL)
743 			printf(" at %s", intrstr);
744 		printf("\n");
745 		goto fail_1;
746 	}
747 	printf(": %s", intrstr);
748 
749 	sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff;
750 
751 	/* Reset the adapter. */
752 	wb_reset(sc);
753 
754 	/*
755 	 * Get station address from the EEPROM.
756 	 */
757 	wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0);
758 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
759 
760 	if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data),
761 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
762 		printf(": can't alloc list data\n");
763 		goto fail_2;
764 	}
765 	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg,
766 	    sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) {
767 		printf(": can't map list data, size %zd\n",
768 		    sizeof(struct wb_list_data));
769 		goto fail_3;
770 	}
771 	if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1,
772 	    sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
773 		printf(": can't create dma map\n");
774 		goto fail_4;
775 	}
776 	if (bus_dmamap_load(pa->pa_dmat, dmamap, kva,
777 	    sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) {
778 		printf(": can't load dma map\n");
779 		goto fail_5;
780 	}
781 	sc->wb_ldata = (struct wb_list_data *)kva;
782 
783 	ifp->if_softc = sc;
784 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
785 	ifp->if_ioctl = wb_ioctl;
786 	ifp->if_start = wb_start;
787 	ifp->if_watchdog = wb_watchdog;
788 	IFQ_SET_MAXLEN(&ifp->if_snd, WB_TX_LIST_CNT - 1);
789 
790 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
791 
792 	/*
793 	 * Do ifmedia setup.
794 	 */
795 	wb_stop(sc);
796 
797 	ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts);
798 	sc->sc_mii.mii_ifp = ifp;
799 	sc->sc_mii.mii_readreg = wb_miibus_readreg;
800 	sc->sc_mii.mii_writereg = wb_miibus_writereg;
801 	sc->sc_mii.mii_statchg = wb_miibus_statchg;
802 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
803 	    0);
804 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
805 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL);
806 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
807 	} else
808 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
809 
810 	/*
811 	 * Call MI attach routines.
812 	 */
813 	if_attach(ifp);
814 	ether_ifattach(ifp);
815 	return;
816 
817 fail_5:
818 	bus_dmamap_destroy(pa->pa_dmat, dmamap);
819 
820 fail_4:
821 	bus_dmamem_unmap(pa->pa_dmat, kva,
822 	    sizeof(struct wb_list_data));
823 
824 fail_3:
825 	bus_dmamem_free(pa->pa_dmat, &seg, rseg);
826 
827 fail_2:
828 	pci_intr_disestablish(pc, sc->sc_ih);
829 
830 fail_1:
831 	bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size);
832 }
833 
834 /*
835  * Initialize the transmit descriptors.
836  */
837 int wb_list_tx_init(sc)
838 	struct wb_softc		*sc;
839 {
840 	struct wb_chain_data	*cd;
841 	struct wb_list_data	*ld;
842 	int			i;
843 
844 	cd = &sc->wb_cdata;
845 	ld = sc->wb_ldata;
846 
847 	for (i = 0; i < WB_TX_LIST_CNT; i++) {
848 		cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
849 		if (i == (WB_TX_LIST_CNT - 1)) {
850 			cd->wb_tx_chain[i].wb_nextdesc =
851 				&cd->wb_tx_chain[0];
852 		} else {
853 			cd->wb_tx_chain[i].wb_nextdesc =
854 				&cd->wb_tx_chain[i + 1];
855 		}
856 	}
857 
858 	cd->wb_tx_free = &cd->wb_tx_chain[0];
859 	cd->wb_tx_tail = cd->wb_tx_head = NULL;
860 
861 	return(0);
862 }
863 
864 
865 /*
866  * Initialize the RX descriptors and allocate mbufs for them. Note that
867  * we arrange the descriptors in a closed ring, so that the last descriptor
868  * points back to the first.
869  */
870 int wb_list_rx_init(sc)
871 	struct wb_softc		*sc;
872 {
873 	struct wb_chain_data	*cd;
874 	struct wb_list_data	*ld;
875 	int			i;
876 
877 	cd = &sc->wb_cdata;
878 	ld = sc->wb_ldata;
879 
880 	for (i = 0; i < WB_RX_LIST_CNT; i++) {
881 		cd->wb_rx_chain[i].wb_ptr =
882 			(struct wb_desc *)&ld->wb_rx_list[i];
883 		cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
884 		wb_newbuf(sc, &cd->wb_rx_chain[i]);
885 		if (i == (WB_RX_LIST_CNT - 1)) {
886 			cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
887 			ld->wb_rx_list[i].wb_next =
888 					VTOPHYS(&ld->wb_rx_list[0]);
889 		} else {
890 			cd->wb_rx_chain[i].wb_nextdesc =
891 					&cd->wb_rx_chain[i + 1];
892 			ld->wb_rx_list[i].wb_next =
893 					VTOPHYS(&ld->wb_rx_list[i + 1]);
894 		}
895 	}
896 
897 	cd->wb_rx_head = &cd->wb_rx_chain[0];
898 
899 	return(0);
900 }
901 
902 /*
903  * Initialize an RX descriptor and attach an MBUF cluster.
904  */
905 void
906 wb_newbuf(sc, c)
907 	struct wb_softc *sc;
908 	struct wb_chain_onefrag *c;
909 {
910 	c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t));
911 	c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN;
912 	c->wb_ptr->wb_status = WB_RXSTAT;
913 }
914 
915 /*
916  * A frame has been uploaded: pass the resulting mbuf chain up to
917  * the higher level protocols.
918  */
919 void wb_rxeof(sc)
920 	struct wb_softc		*sc;
921 {
922 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
923         struct ifnet		*ifp;
924 	struct wb_chain_onefrag	*cur_rx;
925 	int			total_len = 0;
926 	u_int32_t		rxstat;
927 
928 	ifp = &sc->arpcom.ac_if;
929 
930 	while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
931 							WB_RXSTAT_OWN)) {
932 		struct mbuf *m;
933 
934 		cur_rx = sc->wb_cdata.wb_rx_head;
935 		sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
936 
937 		if ((rxstat & WB_RXSTAT_MIIERR) ||
938 		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
939 		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) ||
940 		    !(rxstat & WB_RXSTAT_LASTFRAG) ||
941 		    !(rxstat & WB_RXSTAT_RXCMP)) {
942 			ifp->if_ierrors++;
943 			wb_newbuf(sc, cur_rx);
944 			printf("%s: receiver babbling: possible chip "
945 				"bug, forcing reset\n", sc->sc_dev.dv_xname);
946 			wb_fixmedia(sc);
947 			wb_init(sc);
948 			break;
949 		}
950 
951 		if (rxstat & WB_RXSTAT_RXERR) {
952 			ifp->if_ierrors++;
953 			wb_newbuf(sc, cur_rx);
954 			break;
955 		}
956 
957 		/* No errors; receive the packet. */
958 		total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
959 
960 		/*
961 		 * XXX The Winbond chip includes the CRC with every
962 		 * received frame, and there's no way to turn this
963 		 * behavior off (at least, I can't find anything in
964 	 	 * the manual that explains how to do it) so we have
965 		 * to trim off the CRC manually.
966 		 */
967 		total_len -= ETHER_CRC_LEN;
968 
969 		m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len,
970 		    ETHER_ALIGN);
971 		wb_newbuf(sc, cur_rx);
972 		if (m == NULL) {
973 			ifp->if_ierrors++;
974 			break;
975 		}
976 
977 		ml_enqueue(&ml, m);
978 	}
979 
980 	if_input(ifp, &ml);
981 }
982 
983 void wb_rxeoc(sc)
984 	struct wb_softc		*sc;
985 {
986 	wb_rxeof(sc);
987 
988 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
989 	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
990 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
991 	if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
992 		CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
993 
994 	return;
995 }
996 
997 /*
998  * A frame was downloaded to the chip. It's safe for us to clean up
999  * the list buffers.
1000  */
1001 void wb_txeof(sc)
1002 	struct wb_softc		*sc;
1003 {
1004 	struct wb_chain		*cur_tx;
1005 	struct ifnet		*ifp;
1006 
1007 	ifp = &sc->arpcom.ac_if;
1008 
1009 	/* Clear the timeout timer. */
1010 	ifp->if_timer = 0;
1011 
1012 	if (sc->wb_cdata.wb_tx_head == NULL)
1013 		return;
1014 
1015 	/*
1016 	 * Go through our tx list and free mbufs for those
1017 	 * frames that have been transmitted.
1018 	 */
1019 	while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
1020 		u_int32_t		txstat;
1021 
1022 		cur_tx = sc->wb_cdata.wb_tx_head;
1023 		txstat = WB_TXSTATUS(cur_tx);
1024 
1025 		if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
1026 			break;
1027 
1028 		if (txstat & WB_TXSTAT_TXERR) {
1029 			ifp->if_oerrors++;
1030 			if (txstat & WB_TXSTAT_ABORT)
1031 				ifp->if_collisions++;
1032 			if (txstat & WB_TXSTAT_LATECOLL)
1033 				ifp->if_collisions++;
1034 		}
1035 
1036 		ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1037 
1038 		ifp->if_opackets++;
1039 		m_freem(cur_tx->wb_mbuf);
1040 		cur_tx->wb_mbuf = NULL;
1041 
1042 		if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1043 			sc->wb_cdata.wb_tx_head = NULL;
1044 			sc->wb_cdata.wb_tx_tail = NULL;
1045 			break;
1046 		}
1047 
1048 		sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1049 	}
1050 
1051 	return;
1052 }
1053 
1054 /*
1055  * TX 'end of channel' interrupt handler.
1056  */
1057 void wb_txeoc(sc)
1058 	struct wb_softc		*sc;
1059 {
1060 	struct ifnet		*ifp;
1061 
1062 	ifp = &sc->arpcom.ac_if;
1063 
1064 	ifp->if_timer = 0;
1065 
1066 	if (sc->wb_cdata.wb_tx_head == NULL) {
1067 		ifq_clr_oactive(&ifp->if_snd);
1068 		sc->wb_cdata.wb_tx_tail = NULL;
1069 	} else {
1070 		if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1071 			WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1072 			ifp->if_timer = 5;
1073 			CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1074 		}
1075 	}
1076 
1077 	return;
1078 }
1079 
1080 int wb_intr(arg)
1081 	void			*arg;
1082 {
1083 	struct wb_softc		*sc;
1084 	struct ifnet		*ifp;
1085 	u_int32_t		status;
1086 	int			r = 0;
1087 
1088 	sc = arg;
1089 	ifp = &sc->arpcom.ac_if;
1090 
1091 	if (!(ifp->if_flags & IFF_UP))
1092 		return (r);
1093 
1094 	/* Disable interrupts. */
1095 	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1096 
1097 	for (;;) {
1098 
1099 		status = CSR_READ_4(sc, WB_ISR);
1100 		if (status)
1101 			CSR_WRITE_4(sc, WB_ISR, status);
1102 
1103 		if ((status & WB_INTRS) == 0)
1104 			break;
1105 
1106 		r = 1;
1107 
1108 		if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1109 			ifp->if_ierrors++;
1110 			wb_reset(sc);
1111 			if (status & WB_ISR_RX_ERR)
1112 				wb_fixmedia(sc);
1113 			wb_init(sc);
1114 			continue;
1115 		}
1116 
1117 		if (status & WB_ISR_RX_OK)
1118 			wb_rxeof(sc);
1119 
1120 		if (status & WB_ISR_RX_IDLE)
1121 			wb_rxeoc(sc);
1122 
1123 		if (status & WB_ISR_TX_OK)
1124 			wb_txeof(sc);
1125 
1126 		if (status & WB_ISR_TX_NOBUF)
1127 			wb_txeoc(sc);
1128 
1129 		if (status & WB_ISR_TX_IDLE) {
1130 			wb_txeof(sc);
1131 			if (sc->wb_cdata.wb_tx_head != NULL) {
1132 				WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1133 				CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1134 			}
1135 		}
1136 
1137 		if (status & WB_ISR_TX_UNDERRUN) {
1138 			ifp->if_oerrors++;
1139 			wb_txeof(sc);
1140 			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1141 			/* Jack up TX threshold */
1142 			sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1143 			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1144 			WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1145 			WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1146 		}
1147 
1148 		if (status & WB_ISR_BUS_ERR)
1149 			wb_init(sc);
1150 	}
1151 
1152 	/* Re-enable interrupts. */
1153 	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1154 
1155 	if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1156 		wb_start(ifp);
1157 	}
1158 
1159 	return (r);
1160 }
1161 
1162 void
1163 wb_tick(xsc)
1164 	void *xsc;
1165 {
1166 	struct wb_softc *sc = xsc;
1167 	int s;
1168 
1169 	s = splnet();
1170 	mii_tick(&sc->sc_mii);
1171 	splx(s);
1172 	timeout_add_sec(&sc->wb_tick_tmo, 1);
1173 }
1174 
1175 /*
1176  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1177  * pointers to the fragment pointers.
1178  */
1179 int wb_encap(sc, c, m_head)
1180 	struct wb_softc		*sc;
1181 	struct wb_chain		*c;
1182 	struct mbuf		*m_head;
1183 {
1184 	int			frag = 0;
1185 	struct wb_desc		*f = NULL;
1186 	int			total_len;
1187 	struct mbuf		*m;
1188 
1189 	/*
1190  	 * Start packing the mbufs in this chain into
1191 	 * the fragment pointers. Stop when we run out
1192  	 * of fragments or hit the end of the mbuf chain.
1193 	 */
1194 	m = m_head;
1195 	total_len = 0;
1196 
1197 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1198 		if (m->m_len != 0) {
1199 			if (frag == WB_MAXFRAGS)
1200 				break;
1201 			total_len += m->m_len;
1202 			f = &c->wb_ptr->wb_frag[frag];
1203 			f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1204 			if (frag == 0) {
1205 				f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1206 				f->wb_status = 0;
1207 			} else
1208 				f->wb_status = WB_TXSTAT_OWN;
1209 			f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]);
1210 			f->wb_data = VTOPHYS(mtod(m, vaddr_t));
1211 			frag++;
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Handle special case: we used up all 16 fragments,
1217 	 * but we have more mbufs left in the chain. Copy the
1218 	 * data into an mbuf cluster. Note that we don't
1219 	 * bother clearing the values in the other fragment
1220 	 * pointers/counters; it wouldn't gain us anything,
1221 	 * and would waste cycles.
1222 	 */
1223 	if (m != NULL) {
1224 		struct mbuf		*m_new = NULL;
1225 
1226 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1227 		if (m_new == NULL)
1228 			return(1);
1229 		if (m_head->m_pkthdr.len > MHLEN) {
1230 			MCLGET(m_new, M_DONTWAIT);
1231 			if (!(m_new->m_flags & M_EXT)) {
1232 				m_freem(m_new);
1233 				return(1);
1234 			}
1235 		}
1236 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1237 					mtod(m_new, caddr_t));
1238 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1239 		m_freem(m_head);
1240 		m_head = m_new;
1241 		f = &c->wb_ptr->wb_frag[0];
1242 		f->wb_status = 0;
1243 		f->wb_data = VTOPHYS(mtod(m_new, caddr_t));
1244 		f->wb_ctl = total_len = m_new->m_len;
1245 		f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1246 		frag = 1;
1247 	}
1248 
1249 	if (total_len < WB_MIN_FRAMELEN) {
1250 		f = &c->wb_ptr->wb_frag[frag];
1251 		f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1252 		f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad);
1253 		f->wb_ctl |= WB_TXCTL_TLINK;
1254 		f->wb_status = WB_TXSTAT_OWN;
1255 		frag++;
1256 	}
1257 
1258 	c->wb_mbuf = m_head;
1259 	c->wb_lastdesc = frag - 1;
1260 	WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1261 	WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1262 
1263 	return(0);
1264 }
1265 
1266 /*
1267  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1268  * to the mbuf data regions directly in the transmit lists. We also save a
1269  * copy of the pointers since the transmit list fragment pointers are
1270  * physical addresses.
1271  */
1272 
1273 void wb_start(ifp)
1274 	struct ifnet		*ifp;
1275 {
1276 	struct wb_softc		*sc;
1277 	struct mbuf		*m_head = NULL;
1278 	struct wb_chain		*cur_tx = NULL, *start_tx;
1279 
1280 	sc = ifp->if_softc;
1281 
1282 	/*
1283 	 * Check for an available queue slot. If there are none,
1284 	 * punt.
1285 	 */
1286 	if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1287 		ifq_set_oactive(&ifp->if_snd);
1288 		return;
1289 	}
1290 
1291 	start_tx = sc->wb_cdata.wb_tx_free;
1292 
1293 	while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1294 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1295 		if (m_head == NULL)
1296 			break;
1297 
1298 		/* Pick a descriptor off the free list. */
1299 		cur_tx = sc->wb_cdata.wb_tx_free;
1300 		sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1301 
1302 		/* Pack the data into the descriptor. */
1303 		wb_encap(sc, cur_tx, m_head);
1304 
1305 		if (cur_tx != start_tx)
1306 			WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1307 
1308 #if NBPFILTER > 0
1309 		/*
1310 		 * If there's a BPF listener, bounce a copy of this frame
1311 		 * to him.
1312 		 */
1313 		if (ifp->if_bpf)
1314 			bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf,
1315 			    BPF_DIRECTION_OUT);
1316 #endif
1317 	}
1318 
1319 	/*
1320 	 * If there are no packets queued, bail.
1321 	 */
1322 	if (cur_tx == NULL)
1323 		return;
1324 
1325 	/*
1326 	 * Place the request for the upload interrupt
1327 	 * in the last descriptor in the chain. This way, if
1328 	 * we're chaining several packets at once, we'll only
1329 	 * get an interrupt once for the whole chain rather than
1330 	 * once for each packet.
1331 	 */
1332 	WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1333 	cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1334 	sc->wb_cdata.wb_tx_tail = cur_tx;
1335 
1336 	if (sc->wb_cdata.wb_tx_head == NULL) {
1337 		sc->wb_cdata.wb_tx_head = start_tx;
1338 		WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1339 		CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1340 	} else {
1341 		/*
1342 		 * We need to distinguish between the case where
1343 		 * the own bit is clear because the chip cleared it
1344 		 * and where the own bit is clear because we haven't
1345 		 * set it yet. The magic value WB_UNSET is just some
1346 		 * ramdomly chosen number which doesn't have the own
1347 	 	 * bit set. When we actually transmit the frame, the
1348 		 * status word will have _only_ the own bit set, so
1349 		 * the txeoc handler will be able to tell if it needs
1350 		 * to initiate another transmission to flush out pending
1351 		 * frames.
1352 		 */
1353 		WB_TXOWN(start_tx) = WB_UNSENT;
1354 	}
1355 
1356 	/*
1357 	 * Set a timeout in case the chip goes out to lunch.
1358 	 */
1359 	ifp->if_timer = 5;
1360 
1361 	return;
1362 }
1363 
1364 void wb_init(xsc)
1365 	void			*xsc;
1366 {
1367 	struct wb_softc *sc = xsc;
1368 	struct ifnet *ifp = &sc->arpcom.ac_if;
1369 	int s, i;
1370 
1371 	s = splnet();
1372 
1373 	/*
1374 	 * Cancel pending I/O and free all RX/TX buffers.
1375 	 */
1376 	wb_stop(sc);
1377 	wb_reset(sc);
1378 
1379 	sc->wb_txthresh = WB_TXTHRESH_INIT;
1380 
1381 	/*
1382 	 * Set cache alignment and burst length.
1383 	 */
1384 #ifdef foo
1385 	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1386 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1387 	WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1388 #endif
1389 
1390 	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1391 	WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1392 	switch(sc->wb_cachesize) {
1393 	case 32:
1394 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1395 		break;
1396 	case 16:
1397 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1398 		break;
1399 	case 8:
1400 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1401 		break;
1402 	case 0:
1403 	default:
1404 		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1405 		break;
1406 	}
1407 
1408 	/* This doesn't tend to work too well at 100Mbps. */
1409 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1410 
1411 	/* Init our MAC address */
1412 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1413 		CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]);
1414 	}
1415 
1416 	/* Init circular RX list. */
1417 	if (wb_list_rx_init(sc) == ENOBUFS) {
1418 		printf("%s: initialization failed: no "
1419 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1420 		wb_stop(sc);
1421 		splx(s);
1422 		return;
1423 	}
1424 
1425 	/* Init TX descriptors. */
1426 	wb_list_tx_init(sc);
1427 
1428 	/* If we want promiscuous mode, set the allframes bit. */
1429 	if (ifp->if_flags & IFF_PROMISC) {
1430 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1431 	} else {
1432 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1433 	}
1434 
1435 	/*
1436 	 * Set capture broadcast bit to capture broadcast frames.
1437 	 */
1438 	if (ifp->if_flags & IFF_BROADCAST) {
1439 		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1440 	} else {
1441 		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1442 	}
1443 
1444 	/*
1445 	 * Program the multicast filter, if necessary.
1446 	 */
1447 	wb_setmulti(sc);
1448 
1449 	/*
1450 	 * Load the address of the RX list.
1451 	 */
1452 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1453 	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1454 
1455 	/*
1456 	 * Enable interrupts.
1457 	 */
1458 	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1459 	CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1460 
1461 	/* Enable receiver and transmitter. */
1462 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1463 	CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1464 
1465 	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1466 	CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0]));
1467 	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1468 
1469 	ifp->if_flags |= IFF_RUNNING;
1470 	ifq_clr_oactive(&ifp->if_snd);
1471 
1472 	splx(s);
1473 
1474 	timeout_set(&sc->wb_tick_tmo, wb_tick, sc);
1475 	timeout_add_sec(&sc->wb_tick_tmo, 1);
1476 
1477 	return;
1478 }
1479 
1480 /*
1481  * Set media options.
1482  */
1483 int
1484 wb_ifmedia_upd(ifp)
1485 	struct ifnet *ifp;
1486 {
1487 	struct wb_softc *sc = ifp->if_softc;
1488 
1489 	if (ifp->if_flags & IFF_UP)
1490 		wb_init(sc);
1491 
1492 	return(0);
1493 }
1494 
1495 /*
1496  * Report current media status.
1497  */
1498 void
1499 wb_ifmedia_sts(ifp, ifmr)
1500 	struct ifnet		*ifp;
1501 	struct ifmediareq	*ifmr;
1502 {
1503 	struct wb_softc *sc = ifp->if_softc;
1504 	struct mii_data *mii = &sc->sc_mii;
1505 
1506 	mii_pollstat(mii);
1507 	ifmr->ifm_active = mii->mii_media_active;
1508 	ifmr->ifm_status = mii->mii_media_status;
1509 }
1510 
1511 int wb_ioctl(ifp, command, data)
1512 	struct ifnet		*ifp;
1513 	u_long			command;
1514 	caddr_t			data;
1515 {
1516 	struct wb_softc		*sc = ifp->if_softc;
1517 	struct ifreq		*ifr = (struct ifreq *) data;
1518 	int			s, error = 0;
1519 
1520 	s = splnet();
1521 
1522 	switch(command) {
1523 	case SIOCSIFADDR:
1524 		ifp->if_flags |= IFF_UP;
1525 		wb_init(sc);
1526 		break;
1527 
1528 	case SIOCSIFFLAGS:
1529 		if (ifp->if_flags & IFF_UP) {
1530 			wb_init(sc);
1531 		} else {
1532 			if (ifp->if_flags & IFF_RUNNING)
1533 				wb_stop(sc);
1534 		}
1535 		error = 0;
1536 		break;
1537 
1538 	case SIOCGIFMEDIA:
1539 	case SIOCSIFMEDIA:
1540 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1541 		break;
1542 
1543 	default:
1544 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1545 	}
1546 
1547 	if (error == ENETRESET) {
1548 		if (ifp->if_flags & IFF_RUNNING)
1549 			wb_setmulti(sc);
1550 		error = 0;
1551 	}
1552 
1553 	splx(s);
1554 	return(error);
1555 }
1556 
1557 void wb_watchdog(ifp)
1558 	struct ifnet		*ifp;
1559 {
1560 	struct wb_softc		*sc;
1561 
1562 	sc = ifp->if_softc;
1563 
1564 	ifp->if_oerrors++;
1565 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1566 
1567 #ifdef foo
1568 	if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1569 		printf("%s: no carrier - transceiver cable problem?\n",
1570 		    sc->sc_dev.dv_xname);
1571 #endif
1572 	wb_init(sc);
1573 
1574 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1575 		wb_start(ifp);
1576 
1577 	return;
1578 }
1579 
1580 /*
1581  * Stop the adapter and free any mbufs allocated to the
1582  * RX and TX lists.
1583  */
1584 void wb_stop(sc)
1585 	struct wb_softc		*sc;
1586 {
1587 	int			i;
1588 	struct ifnet		*ifp;
1589 
1590 	ifp = &sc->arpcom.ac_if;
1591 	ifp->if_timer = 0;
1592 
1593 	timeout_del(&sc->wb_tick_tmo);
1594 
1595 	ifp->if_flags &= ~IFF_RUNNING;
1596 	ifq_clr_oactive(&ifp->if_snd);
1597 
1598 	WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1599 	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1600 	CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1601 	CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1602 
1603 	/*
1604 	 * Free data in the RX lists.
1605 	 */
1606 	bzero(&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list));
1607 
1608 	/*
1609 	 * Free the TX list buffers.
1610 	 */
1611 	for (i = 0; i < WB_TX_LIST_CNT; i++) {
1612 		if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1613 			m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1614 			sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1615 		}
1616 	}
1617 
1618 	bzero(&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list));
1619 }
1620 
1621 struct cfattach wb_ca = {
1622 	sizeof(struct wb_softc), wb_probe, wb_attach
1623 };
1624 
1625 struct cfdriver wb_cd = {
1626 	NULL, "wb", DV_IFNET
1627 };
1628