xref: /openbsd-src/sys/dev/ic/xl.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: xl.c,v 1.28 2001/08/12 20:12:12 mickey Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35  */
36 
37 /*
38  * 3Com 3c90x Etherlink XL PCI NIC driver
39  *
40  * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41  * bus-master chips (3c90x cards and embedded controllers) including
42  * the following:
43  *
44  * 3Com 3c900-TPO	10Mbps/RJ-45
45  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46  * 3Com 3c905-TX	10/100Mbps/RJ-45
47  * 3Com 3c905-T4	10/100Mbps/RJ-45
48  * 3Com 3c900B-TPO	10Mbps/RJ-45
49  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51  * 3Com 3c900B-FL	10Mbps/Fiber-optic
52  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53  * 3Com 3c905B-TX	10/100Mbps/RJ-45
54  * 3Com 3c900-FL/FX	10/100Mbps/Fiber-optic
55  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
57  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
58  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
59  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
60  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
61  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
62  * 3Com 3C575TX		10/100Mbps LAN CardBus PC Card
63  * 3Com 3CCFE575BT	10/100Mbps LAN CardBus PC Card
64  * 3Com 3CCFE575CT	10/100Mbps LAN CardBus PC Card
65  * 3Com 3C3FE575CT	10/100Mbps LAN CardBus Type III PC Card
66  * 3Com 3CCFEM656	10/100Mbps LAN+56k Modem CardBus PC Card
67  * 3Com 3CCFEM656B	10/100Mbps LAN+56k Modem CardBus PC Card
68  * 3Com 3CCFEM656C	10/100Mbps LAN+56k Global Modem CardBus PC Card
69  * 3Com 3C3FEM656C	10/100Mbps LAN+56k Global Modem CardBus Type III PC Card
70  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
71  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
72  * Dell on-board 3c920	10/100Mbps/RJ-45
73  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
74  * Dell Latitude laptop docking station embedded 3c905-TX
75  *
76  * Written by Bill Paul <wpaul@ctr.columbia.edu>
77  * Electrical Engineering Department
78  * Columbia University, New York City
79  */
80 
81 /*
82  * The 3c90x series chips use a bus-master DMA interface for transfering
83  * packets to and from the controller chip. Some of the "vortex" cards
84  * (3c59x) also supported a bus master mode, however for those chips
85  * you could only DMA packets to/from a contiguous memory buffer. For
86  * transmission this would mean copying the contents of the queued mbuf
87  * chain into a an mbuf cluster and then DMAing the cluster. This extra
88  * copy would sort of defeat the purpose of the bus master support for
89  * any packet that doesn't fit into a single mbuf.
90  *
91  * By contrast, the 3c90x cards support a fragment-based bus master
92  * mode where mbuf chains can be encapsulated using TX descriptors.
93  * This is similar to other PCI chips such as the Texas Instruments
94  * ThunderLAN and the Intel 82557/82558.
95  *
96  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
97  * bus master chips because they maintain the old PIO interface for
98  * backwards compatibility, but starting with the 3c905B and the
99  * "cyclone" chips, the compatibility interface has been dropped.
100  * Since using bus master DMA is a big win, we use this driver to
101  * support the PCI "boomerang" chips even though they work with the
102  * "vortex" driver in order to obtain better performance.
103  *
104  * This driver is in the /sys/pci directory because it only supports
105  * PCI-based NICs.
106  */
107 
108 #include "bpfilter.h"
109 #include "vlan.h"
110 
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/mbuf.h>
114 #include <sys/protosw.h>
115 #include <sys/socket.h>
116 #include <sys/ioctl.h>
117 #include <sys/errno.h>
118 #include <sys/malloc.h>
119 #include <sys/kernel.h>
120 #include <sys/proc.h>   /* only for declaration of wakeup() used by vm.h */
121 #include <sys/device.h>
122 
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_types.h>
126 #include <net/if_media.h>
127 
128 #ifdef INET
129 #include <netinet/in.h>
130 #include <netinet/in_systm.h>
131 #include <netinet/in_var.h>
132 #include <netinet/ip.h>
133 #include <netinet/if_ether.h>
134 #endif
135 
136 #include <dev/mii/mii.h>
137 #include <dev/mii/miivar.h>
138 
139 #include <machine/bus.h>
140 
141 #if NBPFILTER > 0
142 #include <net/bpf.h>
143 #endif
144 
145 #include <vm/vm.h>              /* for vtophys */
146 
147 #include <dev/ic/xlreg.h>
148 
149 int xl_newbuf		__P((struct xl_softc *, struct xl_chain_onefrag *));
150 void xl_stats_update	__P((void *));
151 int xl_encap		__P((struct xl_softc *, struct xl_chain *,
152     struct mbuf * ));
153 int xl_encap_90xB	__P((struct xl_softc *, struct xl_chain *,
154     struct mbuf * ));
155 void xl_rxeof		__P((struct xl_softc *));
156 int xl_rx_resync	__P((struct xl_softc *));
157 void xl_txeof		__P((struct xl_softc *));
158 void xl_txeof_90xB	__P((struct xl_softc *));
159 void xl_txeoc		__P((struct xl_softc *));
160 int xl_intr		__P((void *));
161 void xl_start		__P((struct ifnet *));
162 void xl_start_90xB	__P((struct ifnet *));
163 int xl_ioctl		__P((struct ifnet *, u_long, caddr_t));
164 void xl_init		__P((void *));
165 void xl_stop		__P((struct xl_softc *));
166 void xl_freetxrx	__P((struct xl_softc *));
167 void xl_watchdog	__P((struct ifnet *));
168 void xl_shutdown	__P((void *));
169 int xl_ifmedia_upd	__P((struct ifnet *));
170 void xl_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
171 
172 int xl_eeprom_wait	__P((struct xl_softc *));
173 int xl_read_eeprom	__P((struct xl_softc *, caddr_t, int, int, int));
174 void xl_mii_sync	__P((struct xl_softc *));
175 void xl_mii_send	__P((struct xl_softc *, u_int32_t, int));
176 int xl_mii_readreg	__P((struct xl_softc *, struct xl_mii_frame *));
177 int xl_mii_writereg	__P((struct xl_softc *, struct xl_mii_frame *));
178 
179 void xl_setcfg		__P((struct xl_softc *));
180 void xl_setmode		__P((struct xl_softc *, int));
181 u_int8_t xl_calchash	__P((caddr_t));
182 void xl_setmulti	__P((struct xl_softc *));
183 void xl_setmulti_hash	__P((struct xl_softc *));
184 void xl_reset		__P((struct xl_softc *, int));
185 int xl_list_rx_init	__P((struct xl_softc *));
186 int xl_list_tx_init	__P((struct xl_softc *));
187 int xl_list_tx_init_90xB	__P((struct xl_softc *));
188 void xl_wait		__P((struct xl_softc *));
189 void xl_mediacheck	__P((struct xl_softc *));
190 void xl_choose_xcvr	__P((struct xl_softc *, int));
191 #ifdef notdef
192 void xl_testpacket	__P((struct xl_softc *));
193 #endif
194 
195 int xl_miibus_readreg	__P((struct device *, int, int));
196 void xl_miibus_writereg	__P((struct device *, int, int, int));
197 void xl_miibus_statchg	__P((struct device *));
198 
199 /*
200  * Murphy's law says that it's possible the chip can wedge and
201  * the 'command in progress' bit may never clear. Hence, we wait
202  * only a finite amount of time to avoid getting caught in an
203  * infinite loop. Normally this delay routine would be a macro,
204  * but it isn't called during normal operation so we can afford
205  * to make it a function.
206  */
207 void xl_wait(sc)
208 	struct xl_softc		*sc;
209 {
210 	register int		i;
211 
212 	for (i = 0; i < XL_TIMEOUT; i++) {
213 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
214 			break;
215 	}
216 
217 #ifdef DIAGNOSTIC
218 	if (i == XL_TIMEOUT)
219 		printf("xl%d: command never completed!\n", sc->xl_unit);
220 #endif
221 
222 	return;
223 }
224 
225 /*
226  * MII access routines are provided for adapters with external
227  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
228  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
229  * Note: if you don't perform the MDIO operations just right,
230  * it's possible to end up with code that works correctly with
231  * some chips/CPUs/processor speeds/bus speeds/etc but not
232  * with others.
233  */
234 #define MII_SET(x)					\
235 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
236 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | x)
237 
238 #define MII_CLR(x)					\
239 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
240 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x)
241 
242 /*
243  * Sync the PHYs by setting data bit and strobing the clock 32 times.
244  */
245 void xl_mii_sync(sc)
246 	struct xl_softc		*sc;
247 {
248 	register int		i;
249 
250 	XL_SEL_WIN(4);
251 	MII_SET(XL_MII_DIR|XL_MII_DATA);
252 
253 	for (i = 0; i < 32; i++) {
254 		MII_SET(XL_MII_CLK);
255 		DELAY(1);
256 		MII_CLR(XL_MII_CLK);
257 		DELAY(1);
258 	}
259 
260 	return;
261 }
262 
263 /*
264  * Clock a series of bits through the MII.
265  */
266 void xl_mii_send(sc, bits, cnt)
267 	struct xl_softc		*sc;
268 	u_int32_t		bits;
269 	int			cnt;
270 {
271 	int			i;
272 
273 	XL_SEL_WIN(4);
274 	MII_CLR(XL_MII_CLK);
275 
276 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
277                 if (bits & i) {
278 			MII_SET(XL_MII_DATA);
279                 } else {
280 			MII_CLR(XL_MII_DATA);
281                 }
282 		DELAY(1);
283 		MII_CLR(XL_MII_CLK);
284 		DELAY(1);
285 		MII_SET(XL_MII_CLK);
286 	}
287 }
288 
289 /*
290  * Read an PHY register through the MII.
291  */
292 int xl_mii_readreg(sc, frame)
293 	struct xl_softc		*sc;
294 	struct xl_mii_frame	*frame;
295 
296 {
297 	int			i, ack, s;
298 
299 	s = splimp();
300 
301 	/*
302 	 * Set up frame for RX.
303 	 */
304 	frame->mii_stdelim = XL_MII_STARTDELIM;
305 	frame->mii_opcode = XL_MII_READOP;
306 	frame->mii_turnaround = 0;
307 	frame->mii_data = 0;
308 
309 	/*
310 	 * Select register window 4.
311 	 */
312 
313 	XL_SEL_WIN(4);
314 
315 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
316 	/*
317  	 * Turn on data xmit.
318 	 */
319 	MII_SET(XL_MII_DIR);
320 
321 	xl_mii_sync(sc);
322 
323 	/*
324 	 * Send command/address info.
325 	 */
326 	xl_mii_send(sc, frame->mii_stdelim, 2);
327 	xl_mii_send(sc, frame->mii_opcode, 2);
328 	xl_mii_send(sc, frame->mii_phyaddr, 5);
329 	xl_mii_send(sc, frame->mii_regaddr, 5);
330 
331 	/* Idle bit */
332 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
333 	DELAY(1);
334 	MII_SET(XL_MII_CLK);
335 	DELAY(1);
336 
337 	/* Turn off xmit. */
338 	MII_CLR(XL_MII_DIR);
339 
340 	/* Check for ack */
341 	MII_CLR(XL_MII_CLK);
342 	DELAY(1);
343 	MII_SET(XL_MII_CLK);
344 	DELAY(1);
345 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
346 
347 	/*
348 	 * Now try reading data bits. If the ack failed, we still
349 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
350 	 */
351 	if (ack) {
352 		for(i = 0; i < 16; i++) {
353 			MII_CLR(XL_MII_CLK);
354 			DELAY(1);
355 			MII_SET(XL_MII_CLK);
356 			DELAY(1);
357 		}
358 		goto fail;
359 	}
360 
361 	for (i = 0x8000; i; i >>= 1) {
362 		MII_CLR(XL_MII_CLK);
363 		DELAY(1);
364 		if (!ack) {
365 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
366 				frame->mii_data |= i;
367 			DELAY(1);
368 		}
369 		MII_SET(XL_MII_CLK);
370 		DELAY(1);
371 	}
372 
373 fail:
374 
375 	MII_CLR(XL_MII_CLK);
376 	DELAY(1);
377 	MII_SET(XL_MII_CLK);
378 	DELAY(1);
379 
380 	splx(s);
381 
382 	if (ack)
383 		return(1);
384 	return(0);
385 }
386 
387 /*
388  * Write to a PHY register through the MII.
389  */
390 int xl_mii_writereg(sc, frame)
391 	struct xl_softc		*sc;
392 	struct xl_mii_frame	*frame;
393 
394 {
395 	int			s;
396 
397 	s = splimp();
398 	/*
399 	 * Set up frame for TX.
400 	 */
401 
402 	frame->mii_stdelim = XL_MII_STARTDELIM;
403 	frame->mii_opcode = XL_MII_WRITEOP;
404 	frame->mii_turnaround = XL_MII_TURNAROUND;
405 
406 	/*
407 	 * Select the window 4.
408 	 */
409 	XL_SEL_WIN(4);
410 
411 	/*
412  	 * Turn on data output.
413 	 */
414 	MII_SET(XL_MII_DIR);
415 
416 	xl_mii_sync(sc);
417 
418 	xl_mii_send(sc, frame->mii_stdelim, 2);
419 	xl_mii_send(sc, frame->mii_opcode, 2);
420 	xl_mii_send(sc, frame->mii_phyaddr, 5);
421 	xl_mii_send(sc, frame->mii_regaddr, 5);
422 	xl_mii_send(sc, frame->mii_turnaround, 2);
423 	xl_mii_send(sc, frame->mii_data, 16);
424 
425 	/* Idle bit. */
426 	MII_SET(XL_MII_CLK);
427 	DELAY(1);
428 	MII_CLR(XL_MII_CLK);
429 	DELAY(1);
430 
431 	/*
432 	 * Turn off xmit.
433 	 */
434 	MII_CLR(XL_MII_DIR);
435 
436 	splx(s);
437 
438 	return(0);
439 }
440 
441 int
442 xl_miibus_readreg(self, phy, reg)
443 	struct device *self;
444 	int phy, reg;
445 {
446 	struct xl_softc *sc = (struct xl_softc *)self;
447 	struct xl_mii_frame	frame;
448 
449 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
450 		return (0);
451 
452 	bzero((char *)&frame, sizeof(frame));
453 
454 	frame.mii_phyaddr = phy;
455 	frame.mii_regaddr = reg;
456 	xl_mii_readreg(sc, &frame);
457 
458 	return(frame.mii_data);
459 }
460 
461 void
462 xl_miibus_writereg(self, phy, reg, data)
463 	struct device *self;
464 	int phy, reg, data;
465 {
466 	struct xl_softc *sc = (struct xl_softc *)self;
467 	struct xl_mii_frame	frame;
468 
469 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
470 		return;
471 
472 	bzero((char *)&frame, sizeof(frame));
473 
474 	frame.mii_phyaddr = phy;
475 	frame.mii_regaddr = reg;
476 	frame.mii_data = data;
477 
478 	xl_mii_writereg(sc, &frame);
479 }
480 
481 void
482 xl_miibus_statchg(self)
483 	struct device *self;
484 {
485 	struct xl_softc *sc = (struct xl_softc *)self;
486 
487 	xl_setcfg(sc);
488 
489 	XL_SEL_WIN(3);
490 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
491 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
492 	else
493 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
494 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
495 }
496 
497 /*
498  * The EEPROM is slow: give it time to come ready after issuing
499  * it a command.
500  */
501 int xl_eeprom_wait(sc)
502 	struct xl_softc		*sc;
503 {
504 	int			i;
505 
506 	for (i = 0; i < 100; i++) {
507 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
508 			DELAY(162);
509 		else
510 			break;
511 	}
512 
513 	if (i == 100) {
514 		printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
515 		return(1);
516 	}
517 
518 	return(0);
519 }
520 
521 /*
522  * Read a sequence of words from the EEPROM. Note that ethernet address
523  * data is stored in the EEPROM in network byte order.
524  */
525 int xl_read_eeprom(sc, dest, off, cnt, swap)
526 	struct xl_softc		*sc;
527 	caddr_t			dest;
528 	int			off;
529 	int			cnt;
530 	int			swap;
531 {
532 	int			err = 0, i;
533 	u_int16_t		word = 0, *ptr;
534 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
535 	/* WARNING! DANGER!
536 	 * It's easy to accidentally overwrite the rom content!
537 	 * Note: the 3c575 uses 8bit EEPROM offsets.
538 	 */
539 	XL_SEL_WIN(0);
540 
541 	if (xl_eeprom_wait(sc))
542 		return(1);
543 
544 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
545 		off += 0x30;
546 
547 	for (i = 0; i < cnt; i++) {
548 		if (sc->xl_flags & XL_FLAG_8BITROM)
549 			CSR_WRITE_2(sc, XL_W0_EE_CMD, (2<<8) | (off + i ));
550 		else
551 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
552 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
553 		err = xl_eeprom_wait(sc);
554 		if (err)
555 			break;
556 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
557 		ptr = (u_int16_t *)(dest + (i * 2));
558 		if (swap)
559 			*ptr = ntohs(word);
560 		else
561 			*ptr = word;
562 	}
563 
564 	return(err ? 1 : 0);
565 }
566 
567 /*
568  * This routine is taken from the 3Com Etherlink XL manual,
569  * page 10-7. It calculates a CRC of the supplied multicast
570  * group address and returns the lower 8 bits, which are used
571  * as the multicast filter position.
572  * Note: the 3c905B currently only supports a 64-bit hash table,
573  * which means we really only need 6 bits, but the manual indicates
574  * that future chip revisions will have a 256-bit hash table,
575  * hence the routine is set up to calculate 8 bits of position
576  * info in case we need it some day.
577  * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
578  * 256 bit hash table. This means we have to use all 8 bits regardless.
579  * On older cards, the upper 2 bits will be ignored. Grrrr....
580  */
581 u_int8_t xl_calchash(addr)
582 	caddr_t			addr;
583 {
584 	u_int32_t		crc, carry;
585 	int			i, j;
586 	u_int8_t		c;
587 
588 	/* Compute CRC for the address value. */
589 	crc = 0xFFFFFFFF; /* initial value */
590 
591 	for (i = 0; i < 6; i++) {
592 		c = *(addr + i);
593 		for (j = 0; j < 8; j++) {
594 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
595 			crc <<= 1;
596 			c >>= 1;
597 			if (carry)
598 				crc = (crc ^ 0x04c11db6) | carry;
599 		}
600 	}
601 
602 	/* return the filter bit position */
603 	return(crc & 0x000000FF);
604 }
605 
606 /*
607  * NICs older than the 3c905B have only one multicast option, which
608  * is to enable reception of all multicast frames.
609  */
610 void xl_setmulti(sc)
611 	struct xl_softc		*sc;
612 {
613 	struct ifnet		*ifp;
614 	struct arpcom *ac = &sc->arpcom;
615 	struct ether_multi *enm;
616 	struct ether_multistep step;
617 	u_int8_t		rxfilt;
618 	int			mcnt = 0;
619 
620 	ifp = &sc->arpcom.ac_if;
621 
622 	XL_SEL_WIN(5);
623 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
624 
625 	if (ifp->if_flags & IFF_ALLMULTI) {
626 		rxfilt |= XL_RXFILTER_ALLMULTI;
627 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
628 		return;
629 	}
630 
631 	ETHER_FIRST_MULTI(step, ac, enm);
632 	while (enm != NULL) {
633 		mcnt++;
634 		ETHER_NEXT_MULTI(step, enm);
635 	}
636 
637 	if (mcnt)
638 		rxfilt |= XL_RXFILTER_ALLMULTI;
639 	else
640 		rxfilt &= ~XL_RXFILTER_ALLMULTI;
641 
642 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
643 
644 	return;
645 }
646 
647 /*
648  * 3c905B adapters have a hash filter that we can program.
649  */
650 void xl_setmulti_hash(sc)
651 	struct xl_softc		*sc;
652 {
653 	struct ifnet		*ifp;
654 	int			h = 0, i;
655 	struct arpcom *ac = &sc->arpcom;
656 	struct ether_multi *enm;
657 	struct ether_multistep step;
658 	u_int8_t		rxfilt;
659 	int			mcnt = 0;
660 
661 	ifp = &sc->arpcom.ac_if;
662 
663 	XL_SEL_WIN(5);
664 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
665 
666 	if (ifp->if_flags & IFF_ALLMULTI) {
667 allmulti:
668 		rxfilt |= XL_RXFILTER_ALLMULTI;
669 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
670 		return;
671 	} else
672 		rxfilt &= ~XL_RXFILTER_ALLMULTI;
673 
674 
675 	/* first, zot all the existing hash bits */
676 	for (i = 0; i < XL_HASHFILT_SIZE; i++)
677 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
678 
679 	/* now program new ones */
680 	ETHER_FIRST_MULTI(step, ac, enm);
681 	while (enm != NULL) {
682 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
683 			ifp->if_flags |= IFF_ALLMULTI;
684 			goto allmulti;
685 		}
686 		h = xl_calchash(enm->enm_addrlo);
687 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
688 		mcnt++;
689 		ETHER_NEXT_MULTI(step, enm);
690 	}
691 
692 	if (mcnt)
693 		rxfilt |= XL_RXFILTER_MULTIHASH;
694 	else
695 		rxfilt &= ~XL_RXFILTER_MULTIHASH;
696 
697 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
698 
699 	return;
700 }
701 
702 #ifdef notdef
703 void xl_testpacket(sc)
704 	struct xl_softc		*sc;
705 {
706 	struct mbuf		*m;
707 	struct ifnet		*ifp;
708 	int			error;
709 
710 	ifp = &sc->arpcom.ac_if;
711 
712 	MGETHDR(m, M_DONTWAIT, MT_DATA);
713 
714 	if (m == NULL)
715 		return;
716 
717 	bcopy(&sc->arpcom.ac_enaddr,
718 		mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
719 	bcopy(&sc->arpcom.ac_enaddr,
720 		mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
721 	mtod(m, struct ether_header *)->ether_type = htons(3);
722 	mtod(m, unsigned char *)[14] = 0;
723 	mtod(m, unsigned char *)[15] = 0;
724 	mtod(m, unsigned char *)[16] = 0xE3;
725 	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
726 	IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
727 	xl_start(ifp);
728 
729 	return;
730 }
731 #endif
732 
733 void xl_setcfg(sc)
734 	struct xl_softc *sc;
735 {
736 	u_int32_t icfg;
737 
738 	XL_SEL_WIN(3);
739 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
740 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
741 	if (sc->xl_media & XL_MEDIAOPT_MII ||
742 	    sc->xl_media & XL_MEDIAOPT_BT4)
743 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
744 	if (sc->xl_media & XL_MEDIAOPT_BTX)
745 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
746 
747 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
748 	CSR_WRITE_4(sc, XL_COMMAND, XL_CMD_COAX_STOP);
749 }
750 
751 void xl_setmode(sc, media)
752 	struct xl_softc *sc;
753 	int media;
754 {
755 	u_int32_t icfg;
756 	u_int16_t mediastat;
757 
758 	printf("xl%d: selecting ", sc->xl_unit);
759 
760 	XL_SEL_WIN(4);
761 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
762 	XL_SEL_WIN(3);
763 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
764 
765 	if (sc->xl_media & XL_MEDIAOPT_BT) {
766 		if (IFM_SUBTYPE(media) == IFM_10_T) {
767 			printf("10baseT transceiver, ");
768 			sc->xl_xcvr = XL_XCVR_10BT;
769 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
770 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
771 			mediastat |= XL_MEDIASTAT_LINKBEAT|
772 					XL_MEDIASTAT_JABGUARD;
773 			mediastat &= ~XL_MEDIASTAT_SQEENB;
774 		}
775 	}
776 
777 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
778 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
779 			printf("100baseFX port, ");
780 			sc->xl_xcvr = XL_XCVR_100BFX;
781 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
782 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
783 			mediastat |= XL_MEDIASTAT_LINKBEAT;
784 			mediastat &= ~XL_MEDIASTAT_SQEENB;
785 		}
786 	}
787 
788 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
789 		if (IFM_SUBTYPE(media) == IFM_10_5) {
790 			printf("AUI port, ");
791 			sc->xl_xcvr = XL_XCVR_AUI;
792 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
793 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
794 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
795 					XL_MEDIASTAT_JABGUARD);
796 			mediastat |= ~XL_MEDIASTAT_SQEENB;
797 		}
798 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
799 			printf("10baseFL transceiver, ");
800 			sc->xl_xcvr = XL_XCVR_AUI;
801 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
802 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
803 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
804 					XL_MEDIASTAT_JABGUARD);
805 			mediastat |= ~XL_MEDIASTAT_SQEENB;
806 		}
807 	}
808 
809 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
810 		if (IFM_SUBTYPE(media) == IFM_10_2) {
811 			printf("BNC port, ");
812 			sc->xl_xcvr = XL_XCVR_COAX;
813 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
814 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
815 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
816 					XL_MEDIASTAT_JABGUARD|
817 					XL_MEDIASTAT_SQEENB);
818 		}
819 	}
820 
821 	if ((media & IFM_GMASK) == IFM_FDX ||
822 			IFM_SUBTYPE(media) == IFM_100_FX) {
823 		printf("full duplex\n");
824 		XL_SEL_WIN(3);
825 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
826 	} else {
827 		printf("half duplex\n");
828 		XL_SEL_WIN(3);
829 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
830 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
831 	}
832 
833 	if (IFM_SUBTYPE(media) == IFM_10_2)
834 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
835 	else
836 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
837 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
838 	XL_SEL_WIN(4);
839 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
840 	DELAY(800);
841 	XL_SEL_WIN(7);
842 }
843 
844 void xl_reset(sc, hard)
845 	struct xl_softc		*sc;
846 {
847 	register int		i;
848 
849 	XL_SEL_WIN(0);
850 	if (hard || (sc->xl_flags & XL_FLAG_WEIRDRESET)) {
851 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
852 		    ((sc->xl_flags & XL_FLAG_WEIRDRESET)?0xFF:0));
853 	}
854 	else
855 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 0x0010);
856 	xl_wait(sc);
857 
858 	for (i = 0; i < XL_TIMEOUT; i++) {
859 		DELAY(10);
860 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
861 			break;
862 	}
863 
864 	DELAY(100000);
865 
866 	/* Reset TX and RX. */
867 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
868 	xl_wait(sc);
869 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
870 	xl_wait(sc);
871 
872 	if (sc->xl_flags & XL_FLAG_WEIRDRESET) {
873 		XL_SEL_WIN(2);
874 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
875 		    XL_W2_RESET_OPTIONS) | 0x4010);
876 	}
877 
878 	/* Wait a little while for the chip to get its brains in order. */
879 	DELAY(100000);
880         return;
881 }
882 
883 /*
884  * This routine is a kludge to work around possible hardware faults
885  * or manufacturing defects that can cause the media options register
886  * (or reset options register, as it's called for the first generation
887  * 3c90x adapters) to return an incorrect result. I have encountered
888  * one Dell Latitude laptop docking station with an integrated 3c905-TX
889  * which doesn't have any of the 'mediaopt' bits set. This screws up
890  * the attach routine pretty badly because it doesn't know what media
891  * to look for. If we find ourselves in this predicament, this routine
892  * will try to guess the media options values and warn the user of a
893  * possible manufacturing defect with his adapter/system/whatever.
894  */
895 void xl_mediacheck(sc)
896 	struct xl_softc		*sc;
897 {
898 	/*
899 	 * If some of the media options bits are set, assume they are
900 	 * correct. If not, try to figure it out down below.
901 	 * XXX I should check for 10baseFL, but I don't have an adapter
902 	 * to test with.
903 	 */
904 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
905 		/*
906 	 	 * Check the XCVR value. If it's not in the normal range
907 	 	 * of values, we need to fake it up here.
908 	 	 */
909 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
910 			return;
911 		else {
912 			printf("xl%d: bogus xcvr value "
913 			"in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
914 			printf("xl%d: choosing new default based "
915 				"on card type\n", sc->xl_unit);
916 		}
917 	} else {
918 		if (sc->xl_type == XL_TYPE_905B &&
919 		    sc->xl_media & XL_MEDIAOPT_10FL)
920 			return;
921 		printf("xl%d: WARNING: no media options bits set in "
922 			"the media options register!!\n", sc->xl_unit);
923 		printf("xl%d: this could be a manufacturing defect in "
924 			"your adapter or system\n", sc->xl_unit);
925 		printf("xl%d: attempting to guess media type; you "
926 			"should probably consult your vendor\n", sc->xl_unit);
927 	}
928 
929 	xl_choose_xcvr(sc, 1);
930 }
931 
932 void xl_choose_xcvr(sc, verbose)
933 	struct xl_softc *sc;
934 	int verbose;
935 {
936 	u_int16_t devid;
937 
938 	/*
939 	 * Read the device ID from the EEPROM.
940 	 * This is what's loaded into the PCI device ID register, so it has
941 	 * to be correct otherwise we wouldn't have gotten this far.
942 	 */
943 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
944 
945 	switch(devid) {
946 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
947 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
948 		sc->xl_media = XL_MEDIAOPT_BT;
949 		sc->xl_xcvr = XL_XCVR_10BT;
950 		if (verbose)
951 			printf("xl%d: guessing 10BaseT transceiver\n",
952 			    sc->xl_unit);
953 		break;
954 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
955 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
956 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
957 		sc->xl_xcvr = XL_XCVR_10BT;
958 		if (verbose)
959 			printf("xl%d: guessing COMBO (AUI/BNC/TP)\n",
960 			    sc->xl_unit);
961 		break;
962 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
963 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
964 		sc->xl_xcvr = XL_XCVR_10BT;
965 		if (verbose)
966 			printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
967 		break;
968 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
969 		sc->xl_media = XL_MEDIAOPT_10FL;
970 		sc->xl_xcvr = XL_XCVR_AUI;
971 		if (verbose)
972 			printf("xl%d: guessing 10baseFL\n", sc->xl_unit);
973 		break;
974 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
975 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
976 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
977 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
978 		sc->xl_media = XL_MEDIAOPT_MII;
979 		sc->xl_xcvr = XL_XCVR_MII;
980 		if (verbose)
981 			printf("xl%d: guessing MII\n", sc->xl_unit);
982 		break;
983 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
984 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
985 		sc->xl_media = XL_MEDIAOPT_BT4;
986 		sc->xl_xcvr = XL_XCVR_MII;
987 		if (verbose)
988 			printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
989 		break;
990 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
991 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
992 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
993 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
994 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
995 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
996 		sc->xl_media = XL_MEDIAOPT_BTX;
997 		sc->xl_xcvr = XL_XCVR_AUTO;
998 		if (verbose)
999 			printf("xl%d: guessing 10/100 internal\n",
1000 			    sc->xl_unit);
1001 		break;
1002 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
1003 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1004 		sc->xl_xcvr = XL_XCVR_AUTO;
1005 		if (verbose)
1006 			printf("xl%d: guessing 10/100 plus BNC/AUI\n",
1007 			    sc->xl_unit);
1008 		break;
1009 	case TC_DEVICEID_3C575_CARDBUS:
1010 	case TC_DEVICEID_3CCFE575BT_CARDBUS:
1011 	case TC_DEVICEID_3CCFE575CT_CARDBUS:
1012 	case TC_DEVICEID_3CCFEM656_CARDBUS:
1013 	case TC_DEVICEID_3CCFEM656B_CARDBUS:
1014 	case TC_DEVICEID_3CCFEM656C_CARDBUS:
1015 		sc->xl_media = XL_MEDIAOPT_MII;
1016 		sc->xl_xcvr = XL_XCVR_MII;
1017 		break;
1018 	default:
1019 		printf("xl%d: unknown device ID: %x -- "
1020 			"defaulting to 10baseT\n", sc->xl_unit, devid);
1021 		sc->xl_media = XL_MEDIAOPT_BT;
1022 		break;
1023 	}
1024 
1025 	return;
1026 }
1027 
1028 /*
1029  * Initialize the transmit descriptors.
1030  */
1031 int xl_list_tx_init(sc)
1032 	struct xl_softc		*sc;
1033 {
1034 	struct xl_chain_data	*cd;
1035 	struct xl_list_data	*ld;
1036 	int			i;
1037 
1038 	cd = &sc->xl_cdata;
1039 	ld = sc->xl_ldata;
1040 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1041 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1042 		if (i == (XL_TX_LIST_CNT - 1))
1043 			cd->xl_tx_chain[i].xl_next = NULL;
1044 		else
1045 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1046 	}
1047 
1048 	cd->xl_tx_free = &cd->xl_tx_chain[0];
1049 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
1050 
1051 	return(0);
1052 }
1053 
1054 /*
1055  * Initialize the transmit desriptors.
1056  */
1057 int
1058 xl_list_tx_init_90xB(sc)
1059 	struct xl_softc *sc;
1060 {
1061 	struct xl_chain_data *cd;
1062 	struct xl_list_data *ld;
1063 	int i;
1064 
1065 	cd = &sc->xl_cdata;
1066 	ld = sc->xl_ldata;
1067 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1068 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1069 		cd->xl_tx_chain[i].xl_phys = vtophys(&ld->xl_tx_list[i]);
1070 		if (i == (XL_TX_LIST_CNT - 1))
1071 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1072 		else
1073 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1074 		if (i == 0)
1075 			cd->xl_tx_chain[i].xl_prev =
1076 			    &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1077 		else
1078 			cd->xl_tx_chain[i].xl_prev =
1079 			    &cd->xl_tx_chain[i - 1];
1080 	}
1081 
1082 	bzero((char *)ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1083 	ld->xl_tx_list[0].xl_status = XL_TXSTAT_EMPTY;
1084 
1085 	cd->xl_tx_prod = 1;
1086 	cd->xl_tx_cons = 1;
1087 	cd->xl_tx_cnt = 0;
1088 
1089 	return (0);
1090 }
1091 
1092 /*
1093  * Initialize the RX descriptors and allocate mbufs for them. Note that
1094  * we arrange the descriptors in a closed ring, so that the last descriptor
1095  * points back to the first.
1096  */
1097 int xl_list_rx_init(sc)
1098 	struct xl_softc		*sc;
1099 {
1100 	struct xl_chain_data	*cd;
1101 	struct xl_list_data	*ld;
1102 	int			i;
1103 
1104 	cd = &sc->xl_cdata;
1105 	ld = sc->xl_ldata;
1106 
1107 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1108 		cd->xl_rx_chain[i].xl_ptr =
1109 			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1110 		if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
1111 			return(ENOBUFS);
1112 		if (i == (XL_RX_LIST_CNT - 1)) {
1113 			cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0];
1114 			ld->xl_rx_list[i].xl_next =
1115 			    vtophys(&ld->xl_rx_list[0]);
1116 		} else {
1117 			cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1];
1118 			ld->xl_rx_list[i].xl_next =
1119 			    vtophys(&ld->xl_rx_list[i + 1]);
1120 		}
1121 	}
1122 
1123 	cd->xl_rx_head = &cd->xl_rx_chain[0];
1124 
1125 	return(0);
1126 }
1127 
1128 /*
1129  * Initialize an RX descriptor and attach an MBUF cluster.
1130  */
1131 int xl_newbuf(sc, c)
1132 	struct xl_softc		*sc;
1133 	struct xl_chain_onefrag	*c;
1134 {
1135 	struct mbuf		*m_new = NULL;
1136 
1137 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1138 	if (m_new == NULL)
1139 		return(ENOBUFS);
1140 
1141 	MCLGET(m_new, M_DONTWAIT);
1142 	if (!(m_new->m_flags & M_EXT)) {
1143 		m_freem(m_new);
1144 		return(ENOBUFS);
1145 	}
1146 
1147 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1148 
1149 	/* Force longword alignment for packet payload. */
1150 	m_adj(m_new, ETHER_ALIGN);
1151 
1152 	c->xl_mbuf = m_new;
1153 	c->xl_ptr->xl_frag.xl_addr = vtophys(mtod(m_new, caddr_t));
1154 	c->xl_ptr->xl_frag.xl_len = MCLBYTES | XL_LAST_FRAG;
1155 	c->xl_ptr->xl_status = 0;
1156 
1157 	return(0);
1158 }
1159 
1160 int xl_rx_resync(sc)
1161 	struct xl_softc *sc;
1162 {
1163 	struct xl_chain_onefrag *pos;
1164 	int i;
1165 
1166 	pos = sc->xl_cdata.xl_rx_head;
1167 
1168 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1169 		if (pos->xl_ptr->xl_status)
1170 			break;
1171 		pos = pos->xl_next;
1172 	}
1173 
1174 	if (i == XL_RX_LIST_CNT)
1175 		return (0);
1176 
1177 	sc->xl_cdata.xl_rx_head = pos;
1178 
1179 	return (EAGAIN);
1180 }
1181 
1182 /*
1183  * A frame has been uploaded: pass the resulting mbuf chain up to
1184  * the higher level protocols.
1185  */
1186 void xl_rxeof(sc)
1187 	struct xl_softc		*sc;
1188 {
1189         struct mbuf		*m;
1190         struct ifnet		*ifp;
1191 	struct xl_chain_onefrag	*cur_rx;
1192 	int			total_len = 0;
1193 	u_int16_t		rxstat;
1194 
1195 	ifp = &sc->arpcom.ac_if;
1196 
1197 again:
1198 
1199 	while((rxstat = sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) {
1200 		cur_rx = sc->xl_cdata.xl_rx_head;
1201 		sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1202 
1203 		/*
1204 		 * If an error occurs, update stats, clear the
1205 		 * status word and leave the mbuf cluster in place:
1206 		 * it should simply get re-used next time this descriptor
1207 	 	 * comes up in the ring.
1208 		 */
1209 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1210 			ifp->if_ierrors++;
1211 			cur_rx->xl_ptr->xl_status = 0;
1212 			continue;
1213 		}
1214 
1215 		/*
1216 		 * If there error bit was not set, the upload complete
1217 		 * bit should be set which means we have a valid packet.
1218 		 * If not, something truly strange has happened.
1219 		 */
1220 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1221 			printf("xl%d: bad receive status -- "
1222 			    "packet dropped", sc->xl_unit);
1223 			ifp->if_ierrors++;
1224 			cur_rx->xl_ptr->xl_status = 0;
1225 			continue;
1226 		}
1227 
1228 		/* No errors; receive the packet. */
1229 		m = cur_rx->xl_mbuf;
1230 		total_len = cur_rx->xl_ptr->xl_status & XL_RXSTAT_LENMASK;
1231 
1232 		/*
1233 		 * Try to conjure up a new mbuf cluster. If that
1234 		 * fails, it means we have an out of memory condition and
1235 		 * should leave the buffer in place and continue. This will
1236 		 * result in a lost packet, but there's little else we
1237 		 * can do in this situation.
1238 		 */
1239 		if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
1240 			ifp->if_ierrors++;
1241 			cur_rx->xl_ptr->xl_status = 0;
1242 			continue;
1243 		}
1244 
1245 		ifp->if_ipackets++;
1246 		m->m_pkthdr.rcvif = ifp;
1247 		m->m_pkthdr.len = m->m_len = total_len;
1248 #if NBPFILTER > 0
1249 		/*
1250 		 * Handle BPF listeners. Let the BPF user see the packet.
1251 		 */
1252 		if (ifp->if_bpf) {
1253 			bpf_mtap(ifp->if_bpf, m);
1254 		}
1255 #endif
1256 		ether_input_mbuf(ifp, m);
1257 	}
1258 
1259 	/*
1260 	 * Handle the 'end of channel' condition. When the upload
1261 	 * engine hits the end of the RX ring, it will stall. This
1262 	 * is our cue to flush the RX ring, reload the uplist pointer
1263 	 * register and unstall the engine.
1264 	 * XXX This is actually a little goofy. With the ThunderLAN
1265 	 * chip, you get an interrupt when the receiver hits the end
1266 	 * of the receive ring, which tells you exactly when you
1267 	 * you need to reload the ring pointer. Here we have to
1268 	 * fake it. I'm mad at myself for not being clever enough
1269 	 * to avoid the use of a goto here.
1270 	 */
1271 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1272 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1273 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1274 		xl_wait(sc);
1275 		CSR_WRITE_4(sc, XL_UPLIST_PTR,
1276 			vtophys(&sc->xl_ldata->xl_rx_list[0]));
1277 		sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
1278 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1279 		goto again;
1280 	}
1281 
1282 	return;
1283 }
1284 
1285 /*
1286  * A frame was downloaded to the chip. It's safe for us to clean up
1287  * the list buffers.
1288  */
1289 void xl_txeof(sc)
1290 	struct xl_softc		*sc;
1291 {
1292 	struct xl_chain		*cur_tx;
1293 	struct ifnet		*ifp;
1294 
1295 	ifp = &sc->arpcom.ac_if;
1296 
1297 	/* Clear the timeout timer. */
1298 	ifp->if_timer = 0;
1299 
1300 	/*
1301 	 * Go through our tx list and free mbufs for those
1302 	 * frames that have been uploaded. Note: the 3c905B
1303 	 * sets a special bit in the status word to let us
1304 	 * know that a frame has been downloaded, but the
1305 	 * original 3c900/3c905 adapters don't do that.
1306 	 * Consequently, we have to use a different test if
1307 	 * xl_type != XL_TYPE_905B.
1308 	 */
1309 	while(sc->xl_cdata.xl_tx_head != NULL) {
1310 		cur_tx = sc->xl_cdata.xl_tx_head;
1311 
1312 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1313 			break;
1314 
1315 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1316 		m_freem(cur_tx->xl_mbuf);
1317 		cur_tx->xl_mbuf = NULL;
1318 		ifp->if_opackets++;
1319 
1320 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1321 		sc->xl_cdata.xl_tx_free = cur_tx;
1322 	}
1323 
1324 	if (sc->xl_cdata.xl_tx_head == NULL) {
1325 		ifp->if_flags &= ~IFF_OACTIVE;
1326 		sc->xl_cdata.xl_tx_tail = NULL;
1327 	} else {
1328 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1329 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1330 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1331 				vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1332 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1333 		}
1334 	}
1335 
1336 	return;
1337 }
1338 
1339 void
1340 xl_txeof_90xB(sc)
1341 	struct xl_softc *sc;
1342 {
1343 	struct xl_chain *cur_tx = NULL;
1344 	struct ifnet *ifp;
1345 	int idx;
1346 
1347 	ifp = &sc->arpcom.ac_if;
1348 
1349 	idx = sc->xl_cdata.xl_tx_cons;
1350 	while(idx != sc->xl_cdata.xl_tx_prod) {
1351 
1352 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1353 
1354 		if (!(cur_tx->xl_ptr->xl_status & XL_TXSTAT_DL_COMPLETE))
1355 			break;
1356 
1357 		if (cur_tx->xl_mbuf != NULL) {
1358 			m_freem(cur_tx->xl_mbuf);
1359 			cur_tx->xl_mbuf = NULL;
1360 		}
1361 
1362 		ifp->if_opackets++;
1363 
1364 		sc->xl_cdata.xl_tx_cnt--;
1365 		XL_INC(idx, XL_TX_LIST_CNT);
1366 		ifp->if_timer = 0;
1367 	}
1368 
1369 	sc->xl_cdata.xl_tx_cons = idx;
1370 
1371 	if (cur_tx != NULL)
1372 		ifp->if_flags &= ~IFF_OACTIVE;
1373 }
1374 
1375 /*
1376  * TX 'end of channel' interrupt handler. Actually, we should
1377  * only get a 'TX complete' interrupt if there's a transmit error,
1378  * so this is really TX error handler.
1379  */
1380 void xl_txeoc(sc)
1381 	struct xl_softc		*sc;
1382 {
1383 	u_int8_t		txstat;
1384 
1385 	while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1386 		if (txstat & XL_TXSTATUS_UNDERRUN ||
1387 			txstat & XL_TXSTATUS_JABBER ||
1388 			txstat & XL_TXSTATUS_RECLAIM) {
1389 			if (txstat != 0x90) {
1390 				printf("xl%d: transmission error: %x\n",
1391 				    sc->xl_unit, txstat);
1392 			}
1393 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1394 			xl_wait(sc);
1395 			if (sc->xl_type == XL_TYPE_905B) {
1396 				int i;
1397 				struct xl_chain *c;
1398 				i = sc->xl_cdata.xl_tx_cons;
1399 				c = &sc->xl_cdata.xl_tx_chain[i];
1400 				CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys);
1401 				CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1402 			} else {
1403 				if (sc->xl_cdata.xl_tx_head != NULL)
1404 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1405 					    vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1406 			}
1407 			/*
1408 			 * Remember to set this for the
1409 			 * first generation 3c90X chips.
1410 			 */
1411 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1412 			if (txstat & XL_TXSTATUS_UNDERRUN &&
1413 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1414 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1415 #ifdef notdef
1416 				printf("xl%d: tx underrun, increasing tx start"
1417 				    " threshold to %d\n", sc->xl_unit,
1418 				    sc->xl_tx_thresh);
1419 #endif
1420 			}
1421 			CSR_WRITE_2(sc, XL_COMMAND,
1422 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1423 			if (sc->xl_type == XL_TYPE_905B) {
1424 				CSR_WRITE_2(sc, XL_COMMAND,
1425 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1426 			}
1427 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1428 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1429 		} else {
1430 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1431 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1432 		}
1433 		/*
1434 		 * Write an arbitrary byte to the TX_STATUS register
1435 	 	 * to clear this interrupt/error and advance to the next.
1436 		 */
1437 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1438 	}
1439 
1440 	return;
1441 }
1442 
1443 int xl_intr(arg)
1444 	void			*arg;
1445 {
1446 	struct xl_softc		*sc;
1447 	struct ifnet		*ifp;
1448 	u_int16_t		status;
1449 	int claimed = 0;
1450 
1451 	sc = arg;
1452 	ifp = &sc->arpcom.ac_if;
1453 
1454 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS) {
1455 
1456 		claimed = 1;
1457 
1458 		CSR_WRITE_2(sc, XL_COMMAND,
1459 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1460 
1461 		if (sc->intr_ack)
1462 			(*sc->intr_ack)(sc);
1463 
1464 		if (status & XL_STAT_UP_COMPLETE) {
1465 			int curpkts;
1466 
1467 			curpkts = ifp->if_ipackets;
1468 			xl_rxeof(sc);
1469 			if (curpkts == ifp->if_ipackets) {
1470 				while (xl_rx_resync(sc))
1471 					xl_rxeof(sc);
1472 			}
1473 		}
1474 
1475 		if (status & XL_STAT_DOWN_COMPLETE) {
1476 			if (sc->xl_type == XL_TYPE_905B)
1477 				xl_txeof_90xB(sc);
1478 			else
1479 				xl_txeof(sc);
1480 		}
1481 
1482 		if (status & XL_STAT_TX_COMPLETE) {
1483 			ifp->if_oerrors++;
1484 			xl_txeoc(sc);
1485 		}
1486 
1487 		if (status & XL_STAT_ADFAIL) {
1488 			xl_reset(sc, 0);
1489 			xl_init(sc);
1490 		}
1491 
1492 		if (status & XL_STAT_STATSOFLOW) {
1493 			sc->xl_stats_no_timeout = 1;
1494 			xl_stats_update(sc);
1495 			sc->xl_stats_no_timeout = 0;
1496 		}
1497 	}
1498 
1499 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1500 		(*ifp->if_start)(ifp);
1501 
1502 	return (claimed);
1503 }
1504 
1505 void xl_stats_update(xsc)
1506 	void			*xsc;
1507 {
1508 	struct xl_softc		*sc;
1509 	struct ifnet		*ifp;
1510 	struct xl_stats		xl_stats;
1511 	u_int8_t		*p;
1512 	int			i;
1513 	struct mii_data		*mii = NULL;
1514 
1515 	bzero((char *)&xl_stats, sizeof(struct xl_stats));
1516 
1517 	sc = xsc;
1518 	ifp = &sc->arpcom.ac_if;
1519 	if (sc->xl_hasmii)
1520 		mii = &sc->sc_mii;
1521 
1522 	p = (u_int8_t *)&xl_stats;
1523 
1524 	/* Read all the stats registers. */
1525 	XL_SEL_WIN(6);
1526 
1527 	for (i = 0; i < 16; i++)
1528 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1529 
1530 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1531 
1532 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1533 				xl_stats.xl_tx_single_collision +
1534 				xl_stats.xl_tx_late_collision;
1535 
1536 	/*
1537 	 * Boomerang and cyclone chips have an extra stats counter
1538 	 * in window 4 (BadSSD). We have to read this too in order
1539 	 * to clear out all the stats registers and avoid a statsoflow
1540 	 * interrupt.
1541 	 */
1542 	XL_SEL_WIN(4);
1543 	CSR_READ_1(sc, XL_W4_BADSSD);
1544 
1545 	if (mii != NULL)
1546 		mii_tick(mii);
1547 
1548 	XL_SEL_WIN(7);
1549 
1550 	if (!sc->xl_stats_no_timeout)
1551 		timeout_add(&sc->xl_stsup_tmo, hz);
1552 
1553 	return;
1554 }
1555 
1556 /*
1557  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1558  * pointers to the fragment pointers.
1559  */
1560 int xl_encap(sc, c, m_head)
1561 	struct xl_softc		*sc;
1562 	struct xl_chain		*c;
1563 	struct mbuf		*m_head;
1564 {
1565 	int			frag = 0;
1566 	struct xl_frag		*f = NULL;
1567 	int			total_len;
1568 	struct mbuf		*m;
1569 
1570 	/*
1571  	 * Start packing the mbufs in this chain into
1572 	 * the fragment pointers. Stop when we run out
1573  	 * of fragments or hit the end of the mbuf chain.
1574 	 */
1575 	m = m_head;
1576 	total_len = 0;
1577 
1578 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1579 		if (m->m_len != 0) {
1580 			if (frag == XL_MAXFRAGS)
1581 				break;
1582 			total_len+= m->m_len;
1583 			c->xl_ptr->xl_frag[frag].xl_addr =
1584 					vtophys(mtod(m, vm_offset_t));
1585 			c->xl_ptr->xl_frag[frag].xl_len = m->m_len;
1586 			frag++;
1587 		}
1588 	}
1589 
1590 	/*
1591 	 * Handle special case: we used up all 63 fragments,
1592 	 * but we have more mbufs left in the chain. Copy the
1593 	 * data into an mbuf cluster. Note that we don't
1594 	 * bother clearing the values in the other fragment
1595 	 * pointers/counters; it wouldn't gain us anything,
1596 	 * and would waste cycles.
1597 	 */
1598 	if (m != NULL) {
1599 		struct mbuf		*m_new = NULL;
1600 
1601 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1602 		if (m_new == NULL)
1603 			return(1);
1604 		if (m_head->m_pkthdr.len > MHLEN) {
1605 			MCLGET(m_new, M_DONTWAIT);
1606 			if (!(m_new->m_flags & M_EXT)) {
1607 				m_freem(m_new);
1608 				return(1);
1609 			}
1610 		}
1611 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1612 					mtod(m_new, caddr_t));
1613 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1614 		m_freem(m_head);
1615 		m_head = m_new;
1616 		f = &c->xl_ptr->xl_frag[0];
1617 		f->xl_addr = vtophys(mtod(m_new, caddr_t));
1618 		f->xl_len = total_len = m_new->m_len;
1619 		frag = 1;
1620 	}
1621 
1622 	c->xl_mbuf = m_head;
1623 	c->xl_ptr->xl_frag[frag - 1].xl_len |=  XL_LAST_FRAG;
1624 	c->xl_ptr->xl_status = total_len;
1625 	c->xl_ptr->xl_next = 0;
1626 
1627 	return(0);
1628 }
1629 
1630 /*
1631  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1632  * to the mbuf data regions directly in the transmit lists. We also save a
1633  * copy of the pointers since the transmit list fragment pointers are
1634  * physical addresses.
1635  */
1636 void xl_start(ifp)
1637 	struct ifnet		*ifp;
1638 {
1639 	struct xl_softc		*sc;
1640 	struct mbuf		*m_head = NULL;
1641 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1642 
1643 	sc = ifp->if_softc;
1644 
1645 	/*
1646 	 * Check for an available queue slot. If there are none,
1647 	 * punt.
1648 	 */
1649 	if (sc->xl_cdata.xl_tx_free == NULL) {
1650 		xl_txeoc(sc);
1651 		xl_txeof(sc);
1652 		if (sc->xl_cdata.xl_tx_free == NULL) {
1653 			ifp->if_flags |= IFF_OACTIVE;
1654 			return;
1655 		}
1656 	}
1657 
1658 	start_tx = sc->xl_cdata.xl_tx_free;
1659 
1660 	while(sc->xl_cdata.xl_tx_free != NULL) {
1661 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1662 		if (m_head == NULL)
1663 			break;
1664 
1665 		/* Pick a descriptor off the free list. */
1666 		cur_tx = sc->xl_cdata.xl_tx_free;
1667 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1668 
1669 		cur_tx->xl_next = NULL;
1670 
1671 		/* Pack the data into the descriptor. */
1672 		xl_encap(sc, cur_tx, m_head);
1673 
1674 		/* Chain it together. */
1675 		if (prev != NULL) {
1676 			prev->xl_next = cur_tx;
1677 			prev->xl_ptr->xl_next = vtophys(cur_tx->xl_ptr);
1678 		}
1679 		prev = cur_tx;
1680 
1681 #if NBPFILTER > 0
1682 		/*
1683 		 * If there's a BPF listener, bounce a copy of this frame
1684 		 * to him.
1685 		 */
1686 		if (ifp->if_bpf)
1687 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf);
1688 #endif
1689 	}
1690 
1691 	/*
1692 	 * If there are no packets queued, bail.
1693 	 */
1694 	if (cur_tx == NULL)
1695 		return;
1696 
1697 	/*
1698 	 * Place the request for the upload interrupt
1699 	 * in the last descriptor in the chain. This way, if
1700 	 * we're chaining several packets at once, we'll only
1701 	 * get an interupt once for the whole chain rather than
1702 	 * once for each packet.
1703 	 */
1704 	cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
1705 
1706 	/*
1707 	 * Queue the packets. If the TX channel is clear, update
1708 	 * the downlist pointer register.
1709 	 */
1710 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1711 	xl_wait(sc);
1712 
1713 	if (sc->xl_cdata.xl_tx_head != NULL) {
1714 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1715 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1716 					vtophys(start_tx->xl_ptr);
1717 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1718 					~XL_TXSTAT_DL_INTR;
1719 		sc->xl_cdata.xl_tx_tail = cur_tx;
1720 	} else {
1721 		sc->xl_cdata.xl_tx_head = start_tx;
1722 		sc->xl_cdata.xl_tx_tail = cur_tx;
1723 	}
1724 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1725 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(start_tx->xl_ptr));
1726 
1727 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1728 
1729 	XL_SEL_WIN(7);
1730 
1731 	/*
1732 	 * Set a timeout in case the chip goes out to lunch.
1733 	 */
1734 	ifp->if_timer = 5;
1735 
1736 	/*
1737 	 * XXX Under certain conditions, usually on slower machines
1738 	 * where interrupts may be dropped, it's possible for the
1739 	 * adapter to chew up all the buffers in the receive ring
1740 	 * and stall, without us being able to do anything about it.
1741 	 * To guard against this, we need to make a pass over the
1742 	 * RX queue to make sure there aren't any packets pending.
1743 	 * Doing it here means we can flush the receive ring at the
1744 	 * same time the chip is DMAing the transmit descriptors we
1745 	 * just gave it.
1746  	 *
1747 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1748 	 * nature of their chips in all their marketing literature;
1749 	 * we may as well take advantage of it. :)
1750 	 */
1751 	xl_rxeof(sc);
1752 
1753 	return;
1754 }
1755 
1756 int xl_encap_90xB(sc, c, m_head)
1757 	struct xl_softc *sc;
1758 	struct xl_chain *c;
1759 	struct mbuf *m_head;
1760 {
1761 	int frag = 0;
1762 	struct xl_frag *f = NULL;
1763 	struct mbuf *m;
1764 	struct xl_list *d;
1765 
1766 	/*
1767 	 * Start packing the mbufs in this chain into
1768 	 * the fragment pointers. Stop when we run out
1769 	 * of fragments or hit the end of the mbuf chain.
1770 	 */
1771 	d = c->xl_ptr;
1772 	d->xl_status = 0;
1773 	d->xl_next = 0;
1774 
1775 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1776 		if (m->m_len != 0) {
1777 			if (frag == XL_MAXFRAGS)
1778 				break;
1779 			f = &d->xl_frag[frag];
1780 			f->xl_addr = vtophys(mtod(m, vm_offset_t));
1781 			f->xl_len = m->m_len;
1782 			frag++;
1783 		}
1784 	}
1785 
1786 	c->xl_mbuf = m_head;
1787 	c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG;
1788 	c->xl_ptr->xl_status = XL_TXSTAT_RND_DEFEAT;
1789 
1790 	return(0);
1791 }
1792 
1793 void
1794 xl_start_90xB(ifp)
1795 	struct ifnet *ifp;
1796 {
1797 	struct xl_softc *sc;
1798 	struct mbuf *m_head = NULL;
1799 	struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1800 	int idx;
1801 
1802 	sc = ifp->if_softc;
1803 
1804 	if (ifp->if_flags & IFF_OACTIVE)
1805 		return;
1806 
1807 	idx = sc->xl_cdata.xl_tx_prod;
1808 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1809 
1810 	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1811 
1812 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1813 			ifp->if_flags |= IFF_OACTIVE;
1814 			break;
1815 		}
1816 
1817 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1818 		if (m_head == NULL)
1819 			break;
1820 
1821 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1822 
1823 		/* Pack the data into the descriptor. */
1824 		xl_encap_90xB(sc, cur_tx, m_head);
1825 
1826 		/* Chain it together. */
1827 		if (prev != NULL)
1828 			prev->xl_ptr->xl_next = cur_tx->xl_phys;
1829 		prev = cur_tx;
1830 
1831 #if NBPFILTER > 0
1832 		/*
1833 		 * If there's a BPF listener, bounce a copy of this frame
1834 		 * to him.
1835 		 */
1836 		if (ifp->if_bpf)
1837 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf);
1838 #endif
1839 
1840 		XL_INC(idx, XL_TX_LIST_CNT);
1841 		sc->xl_cdata.xl_tx_cnt++;
1842 	}
1843 
1844 	/*
1845 	 * If there are no packets queued, bail.
1846 	 */
1847 	if (cur_tx == NULL)
1848 		return;
1849 
1850 	/*
1851 	 * Place the request for the upload interrupt
1852 	 * in the last descriptor in the chain. This way, if
1853 	 * we're chaining several packets at once, we'll only
1854 	 * get an interupt once for the whole chain rather than
1855 	 * once for each packet.
1856 	 */
1857 	cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
1858 
1859 	/* Start transmission */
1860 	sc->xl_cdata.xl_tx_prod = idx;
1861 	start_tx->xl_prev->xl_ptr->xl_next = start_tx->xl_phys;
1862 
1863 	/*
1864 	 * Set a timeout in case the chip goes out to lunch.
1865 	 */
1866 	ifp->if_timer = 5;
1867 }
1868 
1869 void xl_init(xsc)
1870 	void			*xsc;
1871 {
1872 	struct xl_softc		*sc = xsc;
1873 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1874 	int			s, i;
1875 	u_int16_t		rxfilt = 0;
1876 	struct mii_data		*mii = NULL;
1877 
1878 	s = splimp();
1879 
1880 	/*
1881 	 * Cancel pending I/O and free all RX/TX buffers.
1882 	 */
1883 	xl_stop(sc);
1884 
1885 	if (sc->xl_hasmii)
1886 		mii = &sc->sc_mii;
1887 
1888 	if (mii == NULL) {
1889 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1890 		xl_wait(sc);
1891 	}
1892 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1893 	xl_wait(sc);
1894 	DELAY(10000);
1895 
1896 
1897 	/* Init our MAC address */
1898 	XL_SEL_WIN(2);
1899 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1900 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1901 				sc->arpcom.ac_enaddr[i]);
1902 	}
1903 
1904 	/* Clear the station mask. */
1905 	for (i = 0; i < 3; i++)
1906 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1907 #ifdef notdef
1908 	/* Reset TX and RX. */
1909 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1910 	xl_wait(sc);
1911 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1912 	xl_wait(sc);
1913 #endif
1914 	/* Init circular RX list. */
1915 	if (xl_list_rx_init(sc) == ENOBUFS) {
1916 		printf("xl%d: initialization failed: no "
1917 			"memory for rx buffers\n", sc->xl_unit);
1918 		xl_stop(sc);
1919 		splx(s);
1920 		return;
1921 	}
1922 
1923 	/* Init TX descriptors. */
1924 	if (sc->xl_type == XL_TYPE_905B)
1925 		xl_list_tx_init_90xB(sc);
1926 	else
1927 		xl_list_tx_init(sc);
1928 
1929 	/*
1930 	 * Set the TX freethresh value.
1931 	 * Note that this has no effect on 3c905B "cyclone"
1932 	 * cards but is required for 3c900/3c905 "boomerang"
1933 	 * cards in order to enable the download engine.
1934 	 */
1935 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1936 
1937 	/* Set the TX start threshold for best performance. */
1938 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
1939 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1940 
1941 	/*
1942 	 * If this is a 3c905B, also set the tx reclaim threshold.
1943 	 * This helps cut down on the number of tx reclaim errors
1944 	 * that could happen on a busy network. The chip multiplies
1945 	 * the register value by 16 to obtain the actual threshold
1946 	 * in bytes, so we divide by 16 when setting the value here.
1947 	 * The existing threshold value can be examined by reading
1948 	 * the register at offset 9 in window 5.
1949 	 */
1950 	if (sc->xl_type == XL_TYPE_905B) {
1951 		CSR_WRITE_2(sc, XL_COMMAND,
1952 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1953 	}
1954 
1955 	/* Set RX filter bits. */
1956 	XL_SEL_WIN(5);
1957 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
1958 
1959 	/* Set the individual bit to receive frames for this host only. */
1960 	rxfilt |= XL_RXFILTER_INDIVIDUAL;
1961 
1962 	/* If we want promiscuous mode, set the allframes bit. */
1963 	if (ifp->if_flags & IFF_PROMISC) {
1964 		rxfilt |= XL_RXFILTER_ALLFRAMES;
1965 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
1966 	} else {
1967 		rxfilt &= ~XL_RXFILTER_ALLFRAMES;
1968 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
1969 	}
1970 
1971 	/*
1972 	 * Set capture broadcast bit to capture broadcast frames.
1973 	 */
1974 	if (ifp->if_flags & IFF_BROADCAST) {
1975 		rxfilt |= XL_RXFILTER_BROADCAST;
1976 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
1977 	} else {
1978 		rxfilt &= ~XL_RXFILTER_BROADCAST;
1979 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
1980 	}
1981 
1982 	/*
1983 	 * Program the multicast filter, if necessary.
1984 	 */
1985 #if 0
1986 	if (sc->xl_type == XL_TYPE_905B)
1987 #else
1988 	if (0)	/* xl_setmulti_hash() does not work right */
1989 #endif
1990 		xl_setmulti_hash(sc);
1991 	else
1992 		xl_setmulti(sc);
1993 
1994 	/*
1995 	 * Load the address of the RX list. We have to
1996 	 * stall the upload engine before we can manipulate
1997 	 * the uplist pointer register, then unstall it when
1998 	 * we're finished. We also have to wait for the
1999 	 * stall command to complete before proceeding.
2000 	 * Note that we have to do this after any RX resets
2001 	 * have completed since the uplist register is cleared
2002 	 * by a reset.
2003 	 */
2004 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2005 	xl_wait(sc);
2006 	CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0]));
2007 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2008 	xl_wait(sc);
2009 
2010 	if (sc->xl_type == XL_TYPE_905B) {
2011 		/* Set polling interval */
2012 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2013 		/* Load the address of the TX list */
2014 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2015 		xl_wait(sc);
2016 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2017 		    vtophys(&sc->xl_ldata->xl_tx_list[0]));
2018 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2019 		xl_wait(sc);
2020 	}
2021 
2022 	/*
2023 	 * If the coax transceiver is on, make sure to enable
2024 	 * the DC-DC converter.
2025  	 */
2026 	XL_SEL_WIN(3);
2027 	if (sc->xl_xcvr == XL_XCVR_COAX)
2028 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2029 	else
2030 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2031 
2032 #if NVLAN > 0
2033 	/* Set max packet size to handle VLAN frames, only on 3c905B */
2034 	if (sc->xl_type == XL_TYPE_905B)
2035 		CSR_WRITE_2(sc, XL_W3_MAX_PKT_SIZE, 1514 + 4);
2036 #endif
2037 
2038 	/* Clear out the stats counters. */
2039 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2040 	sc->xl_stats_no_timeout = 1;
2041 	xl_stats_update(sc);
2042 	sc->xl_stats_no_timeout = 0;
2043 	XL_SEL_WIN(4);
2044 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2045 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2046 
2047 	/*
2048 	 * Enable interrupts.
2049 	 */
2050 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2051 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2052 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2053 
2054 	if (sc->intr_ack)
2055 		(*sc->intr_ack)(sc);
2056 
2057 	/* Set the RX early threshold */
2058 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2059 	CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2060 
2061 	/* Enable receiver and transmitter. */
2062 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2063 	xl_wait(sc);
2064 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2065 	xl_wait(sc);
2066 
2067 	/* Restore state of BMCR */
2068 	if (mii != NULL)
2069 		mii_mediachg(mii);
2070 
2071 	/* Select window 7 for normal operations. */
2072 	XL_SEL_WIN(7);
2073 
2074 	ifp->if_flags |= IFF_RUNNING;
2075 	ifp->if_flags &= ~IFF_OACTIVE;
2076 
2077 	(void)splx(s);
2078 
2079 	timeout_add(&sc->xl_stsup_tmo, hz);
2080 
2081 	return;
2082 }
2083 
2084 /*
2085  * Set media options.
2086  */
2087 int xl_ifmedia_upd(ifp)
2088 	struct ifnet		*ifp;
2089 {
2090 	struct xl_softc		*sc;
2091 	struct ifmedia		*ifm = NULL;
2092 	struct mii_data		*mii = NULL;
2093 
2094 	sc = ifp->if_softc;
2095 
2096 	if (sc->xl_hasmii)
2097 		mii = &sc->sc_mii;
2098 	if (mii == NULL)
2099 		ifm = &sc->ifmedia;
2100 	else
2101 		ifm = &mii->mii_media;
2102 
2103 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2104 	case IFM_100_FX:
2105 	case IFM_10_FL:
2106 	case IFM_10_2:
2107 	case IFM_10_5:
2108 		xl_setmode(sc, ifm->ifm_media);
2109 		return (0);
2110 		break;
2111 	default:
2112 		break;
2113 	}
2114 
2115 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2116 		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2117 		xl_init(sc);
2118 	} else {
2119 		xl_setmode(sc, ifm->ifm_media);
2120 	}
2121 
2122 	return(0);
2123 }
2124 
2125 /*
2126  * Report current media status.
2127  */
2128 void xl_ifmedia_sts(ifp, ifmr)
2129 	struct ifnet		*ifp;
2130 	struct ifmediareq	*ifmr;
2131 {
2132 	struct xl_softc		*sc;
2133 	u_int32_t		icfg;
2134 	struct mii_data		*mii = NULL;
2135 
2136 	sc = ifp->if_softc;
2137 	if (sc->xl_hasmii != 0)
2138 		mii = &sc->sc_mii;
2139 
2140 	XL_SEL_WIN(3);
2141 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2142 	icfg >>= XL_ICFG_CONNECTOR_BITS;
2143 
2144 	ifmr->ifm_active = IFM_ETHER;
2145 
2146 	switch(icfg) {
2147 	case XL_XCVR_10BT:
2148 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2149 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2150 			ifmr->ifm_active |= IFM_FDX;
2151 		else
2152 			ifmr->ifm_active |= IFM_HDX;
2153 		break;
2154 	case XL_XCVR_AUI:
2155 		if (sc->xl_type == XL_TYPE_905B &&
2156 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2157 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2158 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2159 				ifmr->ifm_active |= IFM_FDX;
2160 			else
2161 				ifmr->ifm_active |= IFM_FDX;
2162 		} else
2163 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2164 		break;
2165 	case XL_XCVR_COAX:
2166 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2167 		break;
2168 	/*
2169 	 * XXX MII and BTX/AUTO should be separate cases.
2170 	 */
2171 
2172 	case XL_XCVR_100BTX:
2173 	case XL_XCVR_AUTO:
2174 	case XL_XCVR_MII:
2175 		if (mii != NULL) {
2176 			mii_pollstat(mii);
2177 			ifmr->ifm_active = mii->mii_media_active;
2178 			ifmr->ifm_status = mii->mii_media_status;
2179 		}
2180 		break;
2181 	case XL_XCVR_100BFX:
2182 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2183 		break;
2184 	default:
2185 		printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
2186 		break;
2187 	}
2188 
2189 	return;
2190 }
2191 
2192 int
2193 xl_ioctl(ifp, command, data)
2194 	struct ifnet *ifp;
2195 	u_long command;
2196 	caddr_t data;
2197 {
2198 	struct xl_softc *sc = ifp->if_softc;
2199 	struct ifreq *ifr = (struct ifreq *)data;
2200 	struct ifaddr *ifa = (struct ifaddr *)data;
2201 	int s, error = 0;
2202 	struct mii_data *mii = NULL;
2203 	u_int8_t rxfilt;
2204 
2205 	s = splimp();
2206 
2207 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
2208 		splx(s);
2209 		return error;
2210 	}
2211 
2212 	switch(command) {
2213 	case SIOCSIFADDR:
2214 		ifp->if_flags |= IFF_UP;
2215 		switch (ifa->ifa_addr->sa_family) {
2216 #ifdef INET
2217 		case AF_INET:
2218 			xl_init(sc);
2219 			arp_ifinit(&sc->arpcom, ifa);
2220 			break;
2221 #endif /* INET */
2222 		default:
2223 			xl_init(sc);
2224 			break;
2225 		}
2226 		break;
2227 
2228 	case SIOCSIFMTU:
2229 		if(ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
2230 			error = EINVAL;
2231 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
2232 			ifp->if_mtu = ifr->ifr_mtu;
2233 		}
2234 		break;
2235 
2236 	case SIOCSIFFLAGS:
2237 		XL_SEL_WIN(5);
2238 		rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2239 		if (ifp->if_flags & IFF_UP) {
2240 			if (ifp->if_flags & IFF_RUNNING &&
2241 			    ifp->if_flags & IFF_PROMISC &&
2242 			    !(sc->xl_if_flags & IFF_PROMISC)) {
2243 				rxfilt |= XL_RXFILTER_ALLFRAMES;
2244 				CSR_WRITE_2(sc, XL_COMMAND,
2245 				    XL_CMD_RX_SET_FILT|rxfilt);
2246 				XL_SEL_WIN(7);
2247 			} else if (ifp->if_flags & IFF_RUNNING &&
2248 			    !(ifp->if_flags & IFF_PROMISC) &&
2249 			    sc->xl_if_flags & IFF_PROMISC) {
2250 				rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2251 				CSR_WRITE_2(sc, XL_COMMAND,
2252 				    XL_CMD_RX_SET_FILT|rxfilt);
2253 				XL_SEL_WIN(7);
2254 			} else
2255 				xl_init(sc);
2256 		} else {
2257 			if (ifp->if_flags & IFF_RUNNING)
2258 				xl_stop(sc);
2259 		}
2260 		sc->xl_if_flags = ifp->if_flags;
2261 		error = 0;
2262 		break;
2263 	case SIOCADDMULTI:
2264 	case SIOCDELMULTI:
2265 		error = (command == SIOCADDMULTI) ?
2266 		    ether_addmulti(ifr, &sc->arpcom) :
2267 		    ether_delmulti(ifr, &sc->arpcom);
2268 
2269 		if (error == ENETRESET) {
2270 			/*
2271 			 * Multicast list has changed; set the hardware
2272 			 * filter accordingly.
2273 			 */
2274 #if 0
2275 			if (sc->xl_type == XL_TYPE_905B)
2276 #else
2277 			if (0)	/* xl_setmulti_hash() does not work right */
2278 #endif
2279 				xl_setmulti_hash(sc);
2280 			else
2281 				xl_setmulti(sc);
2282 			error = 0;
2283 		}
2284 		break;
2285 	case SIOCGIFMEDIA:
2286 	case SIOCSIFMEDIA:
2287 		if (sc->xl_hasmii != 0)
2288 			mii = &sc->sc_mii;
2289 		if (mii == NULL)
2290 			error = ifmedia_ioctl(ifp, ifr,
2291 			    &sc->ifmedia, command);
2292 		else
2293 			error = ifmedia_ioctl(ifp, ifr,
2294 			    &mii->mii_media, command);
2295 		break;
2296 	default:
2297 		error = EINVAL;
2298 		break;
2299 	}
2300 
2301 	(void)splx(s);
2302 
2303 	return(error);
2304 }
2305 
2306 void xl_watchdog(ifp)
2307 	struct ifnet		*ifp;
2308 {
2309 	struct xl_softc		*sc;
2310 	u_int16_t		status = 0;
2311 
2312 	sc = ifp->if_softc;
2313 
2314 	ifp->if_oerrors++;
2315 	XL_SEL_WIN(4);
2316 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2317 	printf("xl%d: watchdog timeout\n", sc->xl_unit);
2318 
2319 	if (status & XL_MEDIASTAT_CARRIER)
2320 		printf("xl%d: no carrier - transceiver cable problem?\n",
2321 								sc->xl_unit);
2322 	xl_txeoc(sc);
2323 	xl_txeof(sc);
2324 	xl_rxeof(sc);
2325 	xl_reset(sc, 0);
2326 	xl_init(sc);
2327 
2328 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2329 		(*ifp->if_start)(ifp);
2330 
2331 	return;
2332 }
2333 
2334 void
2335 xl_freetxrx(sc)
2336 	struct xl_softc *sc;
2337 {
2338 	int i;
2339 
2340 	/*
2341 	 * Free data in the RX lists.
2342 	 */
2343 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2344 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2345 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2346 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2347 		}
2348 	}
2349 	bzero((char *)&sc->xl_ldata->xl_rx_list,
2350 		sizeof(sc->xl_ldata->xl_rx_list));
2351 	/*
2352 	 * Free the TX list buffers.
2353 	 */
2354 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2355 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2356 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2357 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2358 		}
2359 	}
2360 	bzero((char *)&sc->xl_ldata->xl_tx_list,
2361 		sizeof(sc->xl_ldata->xl_tx_list));
2362 }
2363 
2364 /*
2365  * Stop the adapter and free any mbufs allocated to the
2366  * RX and TX lists.
2367  */
2368 void xl_stop(sc)
2369 	struct xl_softc *sc;
2370 {
2371 	struct ifnet *ifp;
2372 
2373 	ifp = &sc->arpcom.ac_if;
2374 	ifp->if_timer = 0;
2375 
2376 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2377 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2378 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2379 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2380 	xl_wait(sc);
2381 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2382 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2383 	DELAY(800);
2384 
2385 #ifdef foo
2386 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2387 	xl_wait(sc);
2388 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2389 	xl_wait(sc);
2390 #endif
2391 
2392 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2393 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2394 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2395 
2396 	if (sc->intr_ack)
2397 		(*sc->intr_ack)(sc);
2398 
2399 	/* Stop the stats updater. */
2400 	timeout_del(&sc->xl_stsup_tmo);
2401 
2402 	xl_freetxrx(sc);
2403 
2404 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2405 
2406 	return;
2407 }
2408 
2409 void
2410 xl_attach(sc)
2411 	struct xl_softc *sc;
2412 {
2413 	u_int8_t enaddr[ETHER_ADDR_LEN];
2414 	struct ifnet *ifp = &sc->arpcom.ac_if;
2415 	caddr_t roundptr;
2416 	u_int round;
2417 	int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2418 	struct ifmedia *ifm;
2419 
2420 	sc->xl_unit = sc->sc_dev.dv_unit;
2421 	xl_reset(sc, 1);
2422 
2423 	/*
2424 	 * Get station address from the EEPROM.
2425 	 */
2426 	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2427 		printf("\n%s: failed to read station address\n",
2428 		    sc->sc_dev.dv_xname);
2429 		return;
2430 	}
2431 	bcopy(enaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
2432 
2433 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
2434 
2435 	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2436 		u_int16_t n;
2437 
2438 		XL_SEL_WIN(2);
2439 		n = CSR_READ_2(sc, 12);
2440 
2441 		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2442 			n |= 0x0010;
2443 
2444 		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2445 			n |= 0x4000;
2446 
2447 		CSR_WRITE_2(sc, 12, n);
2448 	}
2449 
2450 	sc->xl_ldata_ptr = malloc(sizeof(struct xl_list_data) + 8,
2451 	    M_DEVBUF, M_NOWAIT);
2452 	if (sc->xl_ldata_ptr == NULL) {
2453 		printf("%s: no memory for list buffers\n",sc->sc_dev.dv_xname);
2454 		return;
2455 	}
2456 
2457 	sc->xl_ldata = (struct xl_list_data *)sc->xl_ldata_ptr;
2458 #ifdef __alpha__
2459 	round = (u_int64_t)sc->xl_ldata_ptr & 0xf;
2460 #else
2461 	round = (u_int32_t)sc->xl_ldata_ptr & 0xf;
2462 #endif
2463 	roundptr = sc->xl_ldata_ptr;
2464 	for (i = 0; i < 8; i++) {
2465 		if (round % 8) {
2466 			round++;
2467 			roundptr++;
2468 		} else
2469 			break;
2470 	}
2471 	sc->xl_ldata = (struct xl_list_data *)roundptr;
2472 	bzero(sc->xl_ldata, sizeof(struct xl_list_data));
2473 
2474 	/*
2475 	 * Figure out the card type. 3c905B adapters have the
2476 	 * 'supportsNoTxLength' bit set in the capabilities
2477 	 * word in the EEPROM.
2478 	 */
2479 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2480 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH)
2481 		sc->xl_type = XL_TYPE_905B;
2482 	else
2483 		sc->xl_type = XL_TYPE_90X;
2484 
2485 	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2486 
2487 	ifp->if_softc = sc;
2488 	ifp->if_mtu = ETHERMTU;
2489 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2490 	ifp->if_ioctl = xl_ioctl;
2491 	ifp->if_output = ether_output;
2492 	if (sc->xl_type == XL_TYPE_905B)
2493 		ifp->if_start = xl_start_90xB;
2494 	else
2495 		ifp->if_start = xl_start;
2496 	ifp->if_watchdog = xl_watchdog;
2497 	ifp->if_baudrate = 10000000;
2498 	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2499 	IFQ_SET_READY(&ifp->if_snd);
2500 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2501 
2502 #if NVLAN > 0
2503 	if (sc->xl_type == XL_TYPE_905B)
2504 		ifp->if_capabilities = IFCAP_VLAN_MTU;
2505 	/*
2506 	 * XXX
2507 	 * Do other cards filter large packets or simply pass them through?
2508 	 * Apparently only the 905B has the capability to set a larger size.
2509  	 */
2510 #endif
2511 
2512 	XL_SEL_WIN(3);
2513 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2514 
2515 	xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0);
2516 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2517 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2518 
2519 	DELAY(100000);
2520 
2521 	xl_mediacheck(sc);
2522 
2523 	if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
2524 		XL_SEL_WIN(2);
2525 		CSR_WRITE_2(sc, 12, 0x4000 | CSR_READ_2(sc, 12));
2526 	}
2527 
2528 	DELAY(100000);
2529 
2530 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2531 	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2532 		ifmedia_init(&sc->sc_mii.mii_media, 0,
2533 		    xl_ifmedia_upd, xl_ifmedia_sts);
2534 		sc->xl_hasmii = 1;
2535 		sc->sc_mii.mii_ifp = ifp;
2536 		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2537 		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2538 		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2539 		xl_setcfg(sc);
2540 		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2541 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2542 
2543 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2544 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2545 			    0, NULL);
2546 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2547 		}
2548 		else {
2549 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2550 		}
2551 		ifm = &sc->sc_mii.mii_media;
2552 	}
2553 	else {
2554 		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2555 		sc->xl_hasmii = 0;
2556 		ifm = &sc->ifmedia;
2557 	}
2558 
2559 	/*
2560 	 * Sanity check. If the user has selected "auto" and this isn't
2561 	 * a 10/100 card of some kind, we need to force the transceiver
2562 	 * type to something sane.
2563 	 */
2564 	if (sc->xl_xcvr == XL_XCVR_AUTO) {
2565 		xl_choose_xcvr(sc, 0);
2566 		xl_reset(sc, 0);
2567 	}
2568 
2569 	if (sc->xl_media & XL_MEDIAOPT_BT) {
2570 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2571 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2572 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2573 			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2574 	}
2575 
2576 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2577 		/*
2578 		 * Check for a 10baseFL board in disguise.
2579 		 */
2580 		if (sc->xl_type == XL_TYPE_905B &&
2581 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2582 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2583 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2584 			    0, NULL);
2585 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2586 				ifmedia_add(ifm,
2587 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2588 		} else {
2589 			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2590 		}
2591 	}
2592 
2593 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2594 		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2595 	}
2596 
2597 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2598 		ifp->if_baudrate = 100000000;
2599 		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2600 	}
2601 
2602 	/* Choose a default media. */
2603 	switch(sc->xl_xcvr) {
2604 	case XL_XCVR_10BT:
2605 		media = IFM_ETHER|IFM_10_T;
2606 		xl_setmode(sc, media);
2607 		break;
2608 	case XL_XCVR_AUI:
2609 		if (sc->xl_type == XL_TYPE_905B &&
2610 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2611 			media = IFM_ETHER|IFM_10_FL;
2612 			xl_setmode(sc, media);
2613 		} else {
2614 			media = IFM_ETHER|IFM_10_5;
2615 			xl_setmode(sc, media);
2616 		}
2617 		break;
2618 	case XL_XCVR_COAX:
2619 		media = IFM_ETHER|IFM_10_2;
2620 		xl_setmode(sc, media);
2621 		break;
2622 	case XL_XCVR_AUTO:
2623 	case XL_XCVR_100BTX:
2624 	case XL_XCVR_MII:
2625 		/* Chosen by miibus */
2626 		break;
2627 	case XL_XCVR_100BFX:
2628 		media = IFM_ETHER|IFM_100_FX;
2629 		xl_setmode(sc, media);
2630 		break;
2631 	default:
2632 		printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
2633 							sc->xl_xcvr);
2634 		/*
2635 		 * This will probably be wrong, but it prevents
2636 		 * the ifmedia code from panicking.
2637 		 */
2638 		media = IFM_ETHER | IFM_10_T;
2639 		break;
2640 	}
2641 
2642 	if (sc->xl_hasmii == 0)
2643 		ifmedia_set(&sc->ifmedia, media);
2644 
2645 	/*
2646 	 * Call MI attach routines.
2647 	 */
2648 	if_attach(ifp);
2649 	ether_ifattach(ifp);
2650 
2651 	sc->sc_sdhook = shutdownhook_establish(xl_shutdown, sc);
2652 }
2653 
2654 int
2655 xl_detach(sc)
2656 	struct xl_softc *sc;
2657 {
2658 	struct ifnet *ifp = &sc->arpcom.ac_if;
2659 
2660 	/* Unhook our tick handler. */
2661 	timeout_del(&sc->xl_stsup_tmo);
2662 
2663 	xl_freetxrx(sc);
2664 
2665 	/* Detach all PHYs */
2666 	if (sc->xl_hasmii)
2667 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2668 
2669 	/* Delete all remaining media. */
2670 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2671 
2672 	ether_ifdetach(ifp);
2673 	if_detach(ifp);
2674 
2675 	shutdownhook_disestablish(sc->sc_sdhook);
2676 
2677 	return (0);
2678 }
2679 
2680 void
2681 xl_shutdown(v)
2682 	void *v;
2683 {
2684 	struct xl_softc	*sc = (struct xl_softc *)v;
2685 
2686 	xl_reset(sc, 1);
2687 	xl_stop(sc);
2688 }
2689 
2690 struct cfdriver xl_cd = {
2691 	0, "xl", DV_IFNET
2692 };
2693