xref: /openbsd-src/sys/dev/ic/xl.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: xl.c,v 1.104 2011/07/14 16:38:27 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35  */
36 
37 /*
38  * 3Com 3c90x Etherlink XL PCI NIC driver
39  *
40  * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41  * bus-master chips (3c90x cards and embedded controllers) including
42  * the following:
43  *
44  * 3Com 3c900-TPO	10Mbps/RJ-45
45  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46  * 3Com 3c905-TX	10/100Mbps/RJ-45
47  * 3Com 3c905-T4	10/100Mbps/RJ-45
48  * 3Com 3c900B-TPO	10Mbps/RJ-45
49  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51  * 3Com 3c900B-FL	10Mbps/Fiber-optic
52  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53  * 3Com 3c905B-TX	10/100Mbps/RJ-45
54  * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
55  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
57  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
58  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
59  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
60  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63  * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64  * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65  * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66  * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67  * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68  * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70  * Dell on-board 3c920 10/100Mbps/RJ-45
71  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72  * Dell Latitude laptop docking station embedded 3c905-TX
73  *
74  * Written by Bill Paul <wpaul@ctr.columbia.edu>
75  * Electrical Engineering Department
76  * Columbia University, New York City
77  */
78 
79 /*
80  * The 3c90x series chips use a bus-master DMA interface for transfering
81  * packets to and from the controller chip. Some of the "vortex" cards
82  * (3c59x) also supported a bus master mode, however for those chips
83  * you could only DMA packets to/from a contiguous memory buffer. For
84  * transmission this would mean copying the contents of the queued mbuf
85  * chain into an mbuf cluster and then DMAing the cluster. This extra
86  * copy would sort of defeat the purpose of the bus master support for
87  * any packet that doesn't fit into a single mbuf.
88  *
89  * By contrast, the 3c90x cards support a fragment-based bus master
90  * mode where mbuf chains can be encapsulated using TX descriptors.
91  * This is similar to other PCI chips such as the Texas Instruments
92  * ThunderLAN and the Intel 82557/82558.
93  *
94  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95  * bus master chips because they maintain the old PIO interface for
96  * backwards compatibility, but starting with the 3c905B and the
97  * "cyclone" chips, the compatibility interface has been dropped.
98  * Since using bus master DMA is a big win, we use this driver to
99  * support the PCI "boomerang" chips even though they work with the
100  * "vortex" driver in order to obtain better performance.
101  */
102 
103 #include "bpfilter.h"
104 
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/mbuf.h>
108 #include <sys/protosw.h>
109 #include <sys/socket.h>
110 #include <sys/ioctl.h>
111 #include <sys/errno.h>
112 #include <sys/malloc.h>
113 #include <sys/kernel.h>
114 #include <sys/proc.h>   /* only for declaration of wakeup() used by vm.h */
115 #include <sys/device.h>
116 
117 #include <net/if.h>
118 #include <net/if_dl.h>
119 #include <net/if_types.h>
120 #include <net/if_media.h>
121 
122 #ifdef INET
123 #include <netinet/in.h>
124 #include <netinet/in_systm.h>
125 #include <netinet/in_var.h>
126 #include <netinet/ip.h>
127 #include <netinet/if_ether.h>
128 #endif
129 
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 
133 #include <machine/bus.h>
134 
135 #if NBPFILTER > 0
136 #include <net/bpf.h>
137 #endif
138 
139 #include <dev/ic/xlreg.h>
140 
141 /*
142  * TX Checksumming is disabled by default for two reasons:
143  * - TX Checksumming will occasionally produce corrupt packets
144  * - TX Checksumming seems to reduce performance
145  *
146  * Only 905B/C cards were reported to have this problem, it is possible
147  * that later chips _may_ be immune.
148  */
149 #define	XL905B_TXCSUM_BROKEN	1
150 
151 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
152 void xl_stats_update(void *);
153 int xl_encap(struct xl_softc *, struct xl_chain *,
154     struct mbuf * );
155 void xl_rxeof(struct xl_softc *);
156 void xl_txeof(struct xl_softc *);
157 void xl_txeof_90xB(struct xl_softc *);
158 void xl_txeoc(struct xl_softc *);
159 int xl_intr(void *);
160 void xl_start(struct ifnet *);
161 void xl_start_90xB(struct ifnet *);
162 int xl_ioctl(struct ifnet *, u_long, caddr_t);
163 void xl_freetxrx(struct xl_softc *);
164 void xl_watchdog(struct ifnet *);
165 int xl_ifmedia_upd(struct ifnet *);
166 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
167 
168 int xl_eeprom_wait(struct xl_softc *);
169 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
170 void xl_mii_sync(struct xl_softc *);
171 void xl_mii_send(struct xl_softc *, u_int32_t, int);
172 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
173 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
174 
175 void xl_setcfg(struct xl_softc *);
176 void xl_setmode(struct xl_softc *, int);
177 void xl_iff(struct xl_softc *);
178 void xl_iff_90x(struct xl_softc *);
179 void xl_iff_905b(struct xl_softc *);
180 int xl_list_rx_init(struct xl_softc *);
181 void xl_fill_rx_ring(struct xl_softc *);
182 int xl_list_tx_init(struct xl_softc *);
183 int xl_list_tx_init_90xB(struct xl_softc *);
184 void xl_wait(struct xl_softc *);
185 void xl_mediacheck(struct xl_softc *);
186 void xl_choose_xcvr(struct xl_softc *, int);
187 #ifdef notdef
188 void xl_testpacket(struct xl_softc *);
189 #endif
190 
191 int xl_miibus_readreg(struct device *, int, int);
192 void xl_miibus_writereg(struct device *, int, int, int);
193 void xl_miibus_statchg(struct device *);
194 #ifndef SMALL_KERNEL
195 int xl_wol(struct ifnet *, int);
196 void xl_wol_power(struct xl_softc *);
197 #endif
198 
199 int
200 xl_activate(struct device *self, int act)
201 {
202 	struct xl_softc *sc = (struct xl_softc *)self;
203 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
204 	int rv = 0;
205 
206 	switch (act) {
207 	case DVACT_QUIESCE:
208 #ifndef SMALL_KERNEL
209 		xl_wol_power(sc);
210 #endif
211 		rv = config_activate_children(self, act);
212 		break;
213 	case DVACT_SUSPEND:
214 		if (ifp->if_flags & IFF_RUNNING) {
215 			xl_reset(sc);
216 			xl_stop(sc);
217 		}
218 #ifndef SMALL_KERNEL
219 		xl_wol_power(sc);
220 #endif
221 		rv = config_activate_children(self, act);
222 		break;
223 	case DVACT_RESUME:
224 		xl_reset(sc);
225 		rv = config_activate_children(self, act);
226 		if (ifp->if_flags & IFF_UP)
227 			xl_init(sc);
228 		break;
229 	}
230 	return (rv);
231 }
232 
233 /*
234  * Murphy's law says that it's possible the chip can wedge and
235  * the 'command in progress' bit may never clear. Hence, we wait
236  * only a finite amount of time to avoid getting caught in an
237  * infinite loop. Normally this delay routine would be a macro,
238  * but it isn't called during normal operation so we can afford
239  * to make it a function.
240  */
241 void
242 xl_wait(struct xl_softc *sc)
243 {
244 	int	i;
245 
246 	for (i = 0; i < XL_TIMEOUT; i++) {
247 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
248 			break;
249 	}
250 
251 	if (i == XL_TIMEOUT)
252 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
253 }
254 
255 /*
256  * MII access routines are provided for adapters with external
257  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
258  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
259  * Note: if you don't perform the MDIO operations just right,
260  * it's possible to end up with code that works correctly with
261  * some chips/CPUs/processor speeds/bus speeds/etc but not
262  * with others.
263  */
264 #define MII_SET(x)					\
265 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
266 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
267 
268 #define MII_CLR(x)					\
269 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
270 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
271 
272 /*
273  * Sync the PHYs by setting data bit and strobing the clock 32 times.
274  */
275 void
276 xl_mii_sync(struct xl_softc *sc)
277 {
278 	int	i;
279 
280 	XL_SEL_WIN(4);
281 	MII_SET(XL_MII_DIR|XL_MII_DATA);
282 
283 	for (i = 0; i < 32; i++) {
284 		MII_SET(XL_MII_CLK);
285 		MII_SET(XL_MII_DATA);
286 		MII_SET(XL_MII_DATA);
287 		MII_CLR(XL_MII_CLK);
288 		MII_SET(XL_MII_DATA);
289 		MII_SET(XL_MII_DATA);
290 	}
291 }
292 
293 /*
294  * Clock a series of bits through the MII.
295  */
296 void
297 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
298 {
299 	int	i;
300 
301 	XL_SEL_WIN(4);
302 	MII_CLR(XL_MII_CLK);
303 
304 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
305                 if (bits & i) {
306 			MII_SET(XL_MII_DATA);
307                 } else {
308 			MII_CLR(XL_MII_DATA);
309                 }
310 		MII_CLR(XL_MII_CLK);
311 		MII_SET(XL_MII_CLK);
312 	}
313 }
314 
315 /*
316  * Read an PHY register through the MII.
317  */
318 int
319 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
320 {
321 	int	i, ack, s;
322 
323 	s = splnet();
324 
325 	/*
326 	 * Set up frame for RX.
327 	 */
328 	frame->mii_stdelim = XL_MII_STARTDELIM;
329 	frame->mii_opcode = XL_MII_READOP;
330 	frame->mii_turnaround = 0;
331 	frame->mii_data = 0;
332 
333 	/*
334 	 * Select register window 4.
335 	 */
336 
337 	XL_SEL_WIN(4);
338 
339 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
340 	/*
341  	 * Turn on data xmit.
342 	 */
343 	MII_SET(XL_MII_DIR);
344 
345 	xl_mii_sync(sc);
346 
347 	/*
348 	 * Send command/address info.
349 	 */
350 	xl_mii_send(sc, frame->mii_stdelim, 2);
351 	xl_mii_send(sc, frame->mii_opcode, 2);
352 	xl_mii_send(sc, frame->mii_phyaddr, 5);
353 	xl_mii_send(sc, frame->mii_regaddr, 5);
354 
355 	/* Idle bit */
356 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
357 	MII_SET(XL_MII_CLK);
358 
359 	/* Turn off xmit. */
360 	MII_CLR(XL_MII_DIR);
361 
362 	/* Check for ack */
363 	MII_CLR(XL_MII_CLK);
364 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
365 	MII_SET(XL_MII_CLK);
366 
367 	/*
368 	 * Now try reading data bits. If the ack failed, we still
369 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
370 	 */
371 	if (ack) {
372 		for(i = 0; i < 16; i++) {
373 			MII_CLR(XL_MII_CLK);
374 			MII_SET(XL_MII_CLK);
375 		}
376 		goto fail;
377 	}
378 
379 	for (i = 0x8000; i; i >>= 1) {
380 		MII_CLR(XL_MII_CLK);
381 		if (!ack) {
382 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
383 				frame->mii_data |= i;
384 		}
385 		MII_SET(XL_MII_CLK);
386 	}
387 
388 fail:
389 
390 	MII_CLR(XL_MII_CLK);
391 	MII_SET(XL_MII_CLK);
392 
393 	splx(s);
394 
395 	if (ack)
396 		return (1);
397 	return (0);
398 }
399 
400 /*
401  * Write to a PHY register through the MII.
402  */
403 int
404 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
405 {
406 	int	s;
407 
408 	s = splnet();
409 
410 	/*
411 	 * Set up frame for TX.
412 	 */
413 
414 	frame->mii_stdelim = XL_MII_STARTDELIM;
415 	frame->mii_opcode = XL_MII_WRITEOP;
416 	frame->mii_turnaround = XL_MII_TURNAROUND;
417 
418 	/*
419 	 * Select the window 4.
420 	 */
421 	XL_SEL_WIN(4);
422 
423 	/*
424  	 * Turn on data output.
425 	 */
426 	MII_SET(XL_MII_DIR);
427 
428 	xl_mii_sync(sc);
429 
430 	xl_mii_send(sc, frame->mii_stdelim, 2);
431 	xl_mii_send(sc, frame->mii_opcode, 2);
432 	xl_mii_send(sc, frame->mii_phyaddr, 5);
433 	xl_mii_send(sc, frame->mii_regaddr, 5);
434 	xl_mii_send(sc, frame->mii_turnaround, 2);
435 	xl_mii_send(sc, frame->mii_data, 16);
436 
437 	/* Idle bit. */
438 	MII_SET(XL_MII_CLK);
439 	MII_CLR(XL_MII_CLK);
440 
441 	/*
442 	 * Turn off xmit.
443 	 */
444 	MII_CLR(XL_MII_DIR);
445 
446 	splx(s);
447 
448 	return (0);
449 }
450 
451 int
452 xl_miibus_readreg(struct device *self, int phy, int reg)
453 {
454 	struct xl_softc *sc = (struct xl_softc *)self;
455 	struct xl_mii_frame	frame;
456 
457 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
458 		return (0);
459 
460 	bzero(&frame, sizeof(frame));
461 
462 	frame.mii_phyaddr = phy;
463 	frame.mii_regaddr = reg;
464 	xl_mii_readreg(sc, &frame);
465 
466 	return (frame.mii_data);
467 }
468 
469 void
470 xl_miibus_writereg(struct device *self, int phy, int reg, int data)
471 {
472 	struct xl_softc *sc = (struct xl_softc *)self;
473 	struct xl_mii_frame	frame;
474 
475 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
476 		return;
477 
478 	bzero(&frame, sizeof(frame));
479 
480 	frame.mii_phyaddr = phy;
481 	frame.mii_regaddr = reg;
482 	frame.mii_data = data;
483 
484 	xl_mii_writereg(sc, &frame);
485 }
486 
487 void
488 xl_miibus_statchg(struct device *self)
489 {
490 	struct xl_softc *sc = (struct xl_softc *)self;
491 
492 	xl_setcfg(sc);
493 
494 	/* Set ASIC's duplex mode to match the PHY. */
495 	XL_SEL_WIN(3);
496 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
497 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
498 	else
499 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
500 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
501 }
502 
503 /*
504  * The EEPROM is slow: give it time to come ready after issuing
505  * it a command.
506  */
507 int
508 xl_eeprom_wait(struct xl_softc *sc)
509 {
510 	int	i;
511 
512 	for (i = 0; i < 100; i++) {
513 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
514 			DELAY(162);
515 		else
516 			break;
517 	}
518 
519 	if (i == 100) {
520 		printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
521 		return (1);
522 	}
523 
524 	return (0);
525 }
526 
527 /*
528  * Read a sequence of words from the EEPROM. Note that ethernet address
529  * data is stored in the EEPROM in network byte order.
530  */
531 int
532 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
533 {
534 	int		err = 0, i;
535 	u_int16_t	word = 0, *ptr;
536 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
537 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
538 	/* WARNING! DANGER!
539 	 * It's easy to accidentally overwrite the rom content!
540 	 * Note: the 3c575 uses 8bit EEPROM offsets.
541 	 */
542 	XL_SEL_WIN(0);
543 
544 	if (xl_eeprom_wait(sc))
545 		return (1);
546 
547 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
548 		off += 0x30;
549 
550 	for (i = 0; i < cnt; i++) {
551 		if (sc->xl_flags & XL_FLAG_8BITROM)
552 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
553 			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
554 		else
555 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
556 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
557 		err = xl_eeprom_wait(sc);
558 		if (err)
559 			break;
560 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
561 		ptr = (u_int16_t *)(dest + (i * 2));
562 		if (swap)
563 			*ptr = ntohs(word);
564 		else
565 			*ptr = word;
566 	}
567 
568 	return (err ? 1 : 0);
569 }
570 
571 void
572 xl_iff(struct xl_softc *sc)
573 {
574 	if (sc->xl_type == XL_TYPE_905B)
575 		xl_iff_905b(sc);
576 	else
577 		xl_iff_90x(sc);
578 }
579 
580 /*
581  * NICs older than the 3c905B have only one multicast option, which
582  * is to enable reception of all multicast frames.
583  */
584 void
585 xl_iff_90x(struct xl_softc *sc)
586 {
587 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
588 	struct arpcom	*ac = &sc->sc_arpcom;
589 	u_int8_t	rxfilt;
590 
591 	XL_SEL_WIN(5);
592 
593 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
594 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
595 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
596 	ifp->if_flags &= ~IFF_ALLMULTI;
597 
598 	/*
599 	 * Always accept broadcast frames.
600 	 * Always accept frames destined to our station address.
601 	 */
602 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
603 
604 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
605 	    ac->ac_multicnt > 0) {
606 		ifp->if_flags |= IFF_ALLMULTI;
607 		if (ifp->if_flags & IFF_PROMISC)
608 			rxfilt |= XL_RXFILTER_ALLFRAMES;
609 		else
610 			rxfilt |= XL_RXFILTER_ALLMULTI;
611 	}
612 
613 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
614 
615 	XL_SEL_WIN(7);
616 }
617 
618 /*
619  * 3c905B adapters have a hash filter that we can program.
620  */
621 void
622 xl_iff_905b(struct xl_softc *sc)
623 {
624 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
625 	struct arpcom	*ac = &sc->sc_arpcom;
626 	int		h = 0, i;
627 	struct ether_multi *enm;
628 	struct ether_multistep step;
629 	u_int8_t	rxfilt;
630 
631 	XL_SEL_WIN(5);
632 
633 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
634 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
635 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
636 	    XL_RXFILTER_MULTIHASH);
637 	ifp->if_flags &= ~IFF_ALLMULTI;
638 
639 	/*
640 	 * Always accept broadcast frames.
641 	 * Always accept frames destined to our station address.
642 	 */
643 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
644 
645 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
646 		ifp->if_flags |= IFF_ALLMULTI;
647 		if (ifp->if_flags & IFF_PROMISC)
648 			rxfilt |= XL_RXFILTER_ALLFRAMES;
649 		else
650 			rxfilt |= XL_RXFILTER_ALLMULTI;
651 	} else {
652 		rxfilt |= XL_RXFILTER_MULTIHASH;
653 
654 		/* first, zot all the existing hash bits */
655 		for (i = 0; i < XL_HASHFILT_SIZE; i++)
656 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
657 
658 		/* now program new ones */
659 		ETHER_FIRST_MULTI(step, ac, enm);
660 		while (enm != NULL) {
661 			h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
662 			    0x000000FF;
663 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH |
664 			    XL_HASH_SET | h);
665 
666 			ETHER_NEXT_MULTI(step, enm);
667 		}
668 	}
669 
670 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
671 
672 	XL_SEL_WIN(7);
673 }
674 
675 #ifdef notdef
676 void
677 xl_testpacket(struct xl_softc *sc)
678 {
679 	struct mbuf	*m;
680 	struct ifnet	*ifp;
681 	int		error;
682 
683 	ifp = &sc->sc_arpcom.ac_if;
684 
685 	MGETHDR(m, M_DONTWAIT, MT_DATA);
686 
687 	if (m == NULL)
688 		return;
689 
690 	bcopy(&sc->sc_arpcom.ac_enaddr,
691 		mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
692 	bcopy(&sc->sc_arpcom.ac_enaddr,
693 		mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
694 	mtod(m, struct ether_header *)->ether_type = htons(3);
695 	mtod(m, unsigned char *)[14] = 0;
696 	mtod(m, unsigned char *)[15] = 0;
697 	mtod(m, unsigned char *)[16] = 0xE3;
698 	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
699 	IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
700 	xl_start(ifp);
701 }
702 #endif
703 
704 void
705 xl_setcfg(struct xl_softc *sc)
706 {
707 	u_int32_t icfg;
708 
709 	XL_SEL_WIN(3);
710 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
711 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
712 	if (sc->xl_media & XL_MEDIAOPT_MII ||
713 		sc->xl_media & XL_MEDIAOPT_BT4)
714 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
715 	if (sc->xl_media & XL_MEDIAOPT_BTX)
716 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
717 
718 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
719 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
720 }
721 
722 void
723 xl_setmode(struct xl_softc *sc, int media)
724 {
725 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
726 	u_int32_t icfg;
727 	u_int16_t mediastat;
728 
729 	XL_SEL_WIN(4);
730 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
731 	XL_SEL_WIN(3);
732 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
733 
734 	if (sc->xl_media & XL_MEDIAOPT_BT) {
735 		if (IFM_SUBTYPE(media) == IFM_10_T) {
736 			ifp->if_baudrate = IF_Mbps(10);
737 			sc->xl_xcvr = XL_XCVR_10BT;
738 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
739 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
740 			mediastat |= XL_MEDIASTAT_LINKBEAT|
741 					XL_MEDIASTAT_JABGUARD;
742 			mediastat &= ~XL_MEDIASTAT_SQEENB;
743 		}
744 	}
745 
746 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
747 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
748 			ifp->if_baudrate = IF_Mbps(100);
749 			sc->xl_xcvr = XL_XCVR_100BFX;
750 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
751 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
752 			mediastat |= XL_MEDIASTAT_LINKBEAT;
753 			mediastat &= ~XL_MEDIASTAT_SQEENB;
754 		}
755 	}
756 
757 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
758 		if (IFM_SUBTYPE(media) == IFM_10_5) {
759 			ifp->if_baudrate = IF_Mbps(10);
760 			sc->xl_xcvr = XL_XCVR_AUI;
761 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
762 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
763 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
764 					XL_MEDIASTAT_JABGUARD);
765 			mediastat |= ~XL_MEDIASTAT_SQEENB;
766 		}
767 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
768 			ifp->if_baudrate = IF_Mbps(10);
769 			sc->xl_xcvr = XL_XCVR_AUI;
770 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
771 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
772 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
773 					XL_MEDIASTAT_JABGUARD);
774 			mediastat |= ~XL_MEDIASTAT_SQEENB;
775 		}
776 	}
777 
778 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
779 		if (IFM_SUBTYPE(media) == IFM_10_2) {
780 			ifp->if_baudrate = IF_Mbps(10);
781 			sc->xl_xcvr = XL_XCVR_COAX;
782 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
783 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
784 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
785 					XL_MEDIASTAT_JABGUARD|
786 					XL_MEDIASTAT_SQEENB);
787 		}
788 	}
789 
790 	if ((media & IFM_GMASK) == IFM_FDX ||
791 			IFM_SUBTYPE(media) == IFM_100_FX) {
792 		XL_SEL_WIN(3);
793 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
794 	} else {
795 		XL_SEL_WIN(3);
796 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
797 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
798 	}
799 
800 	if (IFM_SUBTYPE(media) == IFM_10_2)
801 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
802 	else
803 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
804 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
805 	XL_SEL_WIN(4);
806 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
807 	DELAY(800);
808 	XL_SEL_WIN(7);
809 }
810 
811 void
812 xl_reset(struct xl_softc *sc)
813 {
814 	int	i;
815 
816 	XL_SEL_WIN(0);
817 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
818 		    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
819 		     XL_RESETOPT_DISADVFD:0));
820 
821 	/*
822 	 * Pause briefly after issuing the reset command before trying
823 	 * to access any other registers. With my 3c575C cardbus card,
824 	 * failing to do this results in the system locking up while
825 	 * trying to poll the command busy bit in the status register.
826 	 */
827 	DELAY(100000);
828 
829 	for (i = 0; i < XL_TIMEOUT; i++) {
830 		DELAY(10);
831 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
832 			break;
833 	}
834 
835 	if (i == XL_TIMEOUT)
836 		printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
837 
838 	/* Note: the RX reset takes an absurd amount of time
839 	 * on newer versions of the Tornado chips such as those
840 	 * on the 3c905CX and newer 3c908C cards. We wait an
841 	 * extra amount of time so that xl_wait() doesn't complain
842 	 * and annoy the users.
843 	 */
844 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
845 	DELAY(100000);
846 	xl_wait(sc);
847 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
848 	xl_wait(sc);
849 
850 	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
851 	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
852 		XL_SEL_WIN(2);
853 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
854 		    XL_W2_RESET_OPTIONS)
855 		    | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
856 		    | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
857 		    );
858 	}
859 
860 	/* Wait a little while for the chip to get its brains in order. */
861 	DELAY(100000);
862 }
863 
864 /*
865  * This routine is a kludge to work around possible hardware faults
866  * or manufacturing defects that can cause the media options register
867  * (or reset options register, as it's called for the first generation
868  * 3c90x adapters) to return an incorrect result. I have encountered
869  * one Dell Latitude laptop docking station with an integrated 3c905-TX
870  * which doesn't have any of the 'mediaopt' bits set. This screws up
871  * the attach routine pretty badly because it doesn't know what media
872  * to look for. If we find ourselves in this predicament, this routine
873  * will try to guess the media options values and warn the user of a
874  * possible manufacturing defect with his adapter/system/whatever.
875  */
876 void
877 xl_mediacheck(struct xl_softc *sc)
878 {
879 	/*
880 	 * If some of the media options bits are set, assume they are
881 	 * correct. If not, try to figure it out down below.
882 	 * XXX I should check for 10baseFL, but I don't have an adapter
883 	 * to test with.
884 	 */
885 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
886 		/*
887 	 	 * Check the XCVR value. If it's not in the normal range
888 	 	 * of values, we need to fake it up here.
889 	 	 */
890 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
891 			return;
892 		else {
893 			printf("%s: bogus xcvr value "
894 			"in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
895 			printf("%s: choosing new default based "
896 				"on card type\n", sc->sc_dev.dv_xname);
897 		}
898 	} else {
899 		if (sc->xl_type == XL_TYPE_905B &&
900 		    sc->xl_media & XL_MEDIAOPT_10FL)
901 			return;
902 		printf("%s: WARNING: no media options bits set in "
903 			"the media options register!!\n", sc->sc_dev.dv_xname);
904 		printf("%s: this could be a manufacturing defect in "
905 			"your adapter or system\n", sc->sc_dev.dv_xname);
906 		printf("%s: attempting to guess media type; you "
907 			"should probably consult your vendor\n", sc->sc_dev.dv_xname);
908 	}
909 
910 	xl_choose_xcvr(sc, 1);
911 }
912 
913 void
914 xl_choose_xcvr(struct xl_softc *sc, int verbose)
915 {
916 	u_int16_t devid;
917 
918 	/*
919 	 * Read the device ID from the EEPROM.
920 	 * This is what's loaded into the PCI device ID register, so it has
921 	 * to be correct otherwise we wouldn't have gotten this far.
922 	 */
923 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
924 
925 	switch(devid) {
926 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
927 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
928 		sc->xl_media = XL_MEDIAOPT_BT;
929 		sc->xl_xcvr = XL_XCVR_10BT;
930 		if (verbose)
931 			printf("%s: guessing 10BaseT transceiver\n",
932 			    sc->sc_dev.dv_xname);
933 		break;
934 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
935 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
936 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
937 		sc->xl_xcvr = XL_XCVR_10BT;
938 		if (verbose)
939 			printf("%s: guessing COMBO (AUI/BNC/TP)\n",
940 			    sc->sc_dev.dv_xname);
941 		break;
942 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
943 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
944 		sc->xl_xcvr = XL_XCVR_10BT;
945 		if (verbose)
946 			printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
947 		break;
948 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
949 		sc->xl_media = XL_MEDIAOPT_10FL;
950 		sc->xl_xcvr = XL_XCVR_AUI;
951 		if (verbose)
952 			printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
953 		break;
954 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
955 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
956 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
957 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
958 	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
959 	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
960 	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
961 	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
962 	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
963 	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
964 	case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
965 		sc->xl_media = XL_MEDIAOPT_MII;
966 		sc->xl_xcvr = XL_XCVR_MII;
967 		if (verbose)
968 			printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
969 		break;
970 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
971 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
972 		sc->xl_media = XL_MEDIAOPT_BT4;
973 		sc->xl_xcvr = XL_XCVR_MII;
974 		if (verbose)
975 			printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
976 		break;
977 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
978 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
979 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
980 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
981 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
982 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
983 		sc->xl_media = XL_MEDIAOPT_BTX;
984 		sc->xl_xcvr = XL_XCVR_AUTO;
985 		if (verbose)
986 			printf("%s: guessing 10/100 internal\n",
987 			    sc->sc_dev.dv_xname);
988 		break;
989 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
990 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
991 		sc->xl_xcvr = XL_XCVR_AUTO;
992 		if (verbose)
993 			printf("%s: guessing 10/100 plus BNC/AUI\n",
994 			    sc->sc_dev.dv_xname);
995 		break;
996 	default:
997 		printf("%s: unknown device ID: %x -- "
998 			"defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
999 		sc->xl_media = XL_MEDIAOPT_BT;
1000 		break;
1001 	}
1002 }
1003 
1004 /*
1005  * Initialize the transmit descriptors.
1006  */
1007 int
1008 xl_list_tx_init(struct xl_softc *sc)
1009 {
1010 	struct xl_chain_data	*cd;
1011 	struct xl_list_data	*ld;
1012 	int			i;
1013 
1014 	cd = &sc->xl_cdata;
1015 	ld = sc->xl_ldata;
1016 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1017 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1018 		if (i == (XL_TX_LIST_CNT - 1))
1019 			cd->xl_tx_chain[i].xl_next = NULL;
1020 		else
1021 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1022 	}
1023 
1024 	cd->xl_tx_free = &cd->xl_tx_chain[0];
1025 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
1026 
1027 	return (0);
1028 }
1029 
1030 /*
1031  * Initialize the transmit descriptors.
1032  */
1033 int
1034 xl_list_tx_init_90xB(struct xl_softc *sc)
1035 {
1036 	struct xl_chain_data	*cd;
1037 	struct xl_list_data	*ld;
1038 	int			i, next, prev;
1039 
1040 	cd = &sc->xl_cdata;
1041 	ld = sc->xl_ldata;
1042 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1043 		if (i == (XL_TX_LIST_CNT - 1))
1044 			next = 0;
1045 		else
1046 			next = i + 1;
1047 		if (i == 0)
1048 			prev = XL_TX_LIST_CNT - 1;
1049 		else
1050 			prev = i - 1;
1051 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1052 		cd->xl_tx_chain[i].xl_phys =
1053 		    sc->sc_listmap->dm_segs[0].ds_addr +
1054 		    offsetof(struct xl_list_data, xl_tx_list[i]);
1055 		cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1056 		cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1057 	}
1058 
1059 	bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1060 	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1061 
1062 	cd->xl_tx_prod = 1;
1063 	cd->xl_tx_cons = 1;
1064 	cd->xl_tx_cnt = 0;
1065 
1066 	return (0);
1067 }
1068 
1069 /*
1070  * Initialize the RX descriptors and allocate mbufs for them. Note that
1071  * we arrange the descriptors in a closed ring, so that the last descriptor
1072  * points back to the first.
1073  */
1074 int
1075 xl_list_rx_init(struct xl_softc *sc)
1076 {
1077 	struct xl_chain_data	*cd;
1078 	struct xl_list_data	*ld;
1079 	int			i, n;
1080 	bus_addr_t		next;
1081 
1082 	cd = &sc->xl_cdata;
1083 	ld = sc->xl_ldata;
1084 
1085 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1086 		cd->xl_rx_chain[i].xl_ptr =
1087 			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1088 		if (i == (XL_RX_LIST_CNT - 1))
1089 			n = 0;
1090 		else
1091 			n = i + 1;
1092 		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1093 		next = sc->sc_listmap->dm_segs[0].ds_addr +
1094 		       offsetof(struct xl_list_data, xl_rx_list[n]);
1095 		ld->xl_rx_list[i].xl_next = htole32(next);
1096 	}
1097 
1098 	cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
1099 	cd->xl_rx_cnt = 0;
1100 	xl_fill_rx_ring(sc);
1101 	return (0);
1102 }
1103 
1104 void
1105 xl_fill_rx_ring(struct xl_softc *sc)
1106 {
1107 	struct xl_chain_data    *cd;
1108 	struct xl_list_data     *ld;
1109 
1110 	cd = &sc->xl_cdata;
1111 	ld = sc->xl_ldata;
1112 
1113 	while (cd->xl_rx_cnt < XL_RX_LIST_CNT) {
1114 		if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS)
1115 			break;
1116 		cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
1117 		cd->xl_rx_cnt++;
1118 	}
1119 }
1120 
1121 
1122 /*
1123  * Initialize an RX descriptor and attach an MBUF cluster.
1124  */
1125 int
1126 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1127 {
1128 	struct mbuf	*m_new = NULL;
1129 	bus_dmamap_t	map;
1130 
1131 	m_new = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1132 
1133 	if (!m_new)
1134 		return (ENOBUFS);
1135 
1136 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1137 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1138 	    mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1139 		m_freem(m_new);
1140 		return (ENOBUFS);
1141 	}
1142 
1143 	/* sync the old map, and unload it (if necessary) */
1144 	if (c->map->dm_nsegs != 0) {
1145 		bus_dmamap_sync(sc->sc_dmat, c->map,
1146 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1147 		bus_dmamap_unload(sc->sc_dmat, c->map);
1148 	}
1149 
1150 	map = c->map;
1151 	c->map = sc->sc_rx_sparemap;
1152 	sc->sc_rx_sparemap = map;
1153 
1154 	/* Force longword alignment for packet payload. */
1155 	m_adj(m_new, ETHER_ALIGN);
1156 
1157 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1158 	    BUS_DMASYNC_PREREAD);
1159 
1160 	c->xl_mbuf = m_new;
1161 	c->xl_ptr->xl_frag.xl_addr =
1162 	    htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1163 	c->xl_ptr->xl_frag.xl_len =
1164 	    htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1165 	c->xl_ptr->xl_status = htole32(0);
1166 
1167 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1168 	    ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1169 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1170 
1171 	return (0);
1172 }
1173 
1174 
1175 /*
1176  * A frame has been uploaded: pass the resulting mbuf chain up to
1177  * the higher level protocols.
1178  */
1179 void
1180 xl_rxeof(struct xl_softc *sc)
1181 {
1182         struct mbuf		*m;
1183         struct ifnet		*ifp;
1184 	struct xl_chain_onefrag	*cur_rx;
1185 	int			total_len = 0;
1186 	u_int32_t		rxstat;
1187 	u_int16_t		sumflags = 0;
1188 
1189 	ifp = &sc->sc_arpcom.ac_if;
1190 
1191 again:
1192 
1193 	while (sc->xl_cdata.xl_rx_cnt > 0) {
1194 		cur_rx = sc->xl_cdata.xl_rx_cons;
1195 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1196 		    ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1197 		    sizeof(struct xl_list),
1198 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1199 		if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0)
1200 			break;
1201 		m = cur_rx->xl_mbuf;
1202 		cur_rx->xl_mbuf = NULL;
1203 		sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
1204 		sc->xl_cdata.xl_rx_cnt--;
1205 		total_len = rxstat & XL_RXSTAT_LENMASK;
1206 
1207 		/*
1208 		 * Since we have told the chip to allow large frames,
1209 		 * we need to trap giant frame errors in software. We allow
1210 		 * a little more than the normal frame size to account for
1211 		 * frames with VLAN tags.
1212 		 */
1213 		if (total_len > XL_MAX_FRAMELEN)
1214 			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1215 
1216 		/*
1217 		 * If an error occurs, update stats, clear the
1218 		 * status word and leave the mbuf cluster in place:
1219 		 * it should simply get re-used next time this descriptor
1220 	 	 * comes up in the ring.
1221 		 */
1222 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1223 			ifp->if_ierrors++;
1224 			cur_rx->xl_ptr->xl_status = htole32(0);
1225 			m_freem(m);
1226 			continue;
1227 		}
1228 
1229 		/*
1230 		 * If the error bit was not set, the upload complete
1231 		 * bit should be set which means we have a valid packet.
1232 		 * If not, something truly strange has happened.
1233 		 */
1234 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1235 			printf("%s: bad receive status -- "
1236 			    "packet dropped\n", sc->sc_dev.dv_xname);
1237 			ifp->if_ierrors++;
1238 			cur_rx->xl_ptr->xl_status = htole32(0);
1239 			m_freem(m);
1240 			continue;
1241 		}
1242 
1243 		ifp->if_ipackets++;
1244 		m->m_pkthdr.rcvif = ifp;
1245 		m->m_pkthdr.len = m->m_len = total_len;
1246 #if NBPFILTER > 0
1247 		/*
1248 		 * Handle BPF listeners. Let the BPF user see the packet.
1249 		 */
1250 		if (ifp->if_bpf) {
1251 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1252 		}
1253 #endif
1254 
1255 		if (sc->xl_type == XL_TYPE_905B) {
1256 			if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1257 			    (rxstat & XL_RXSTAT_IPCKOK))
1258 				sumflags |= M_IPV4_CSUM_IN_OK;
1259 
1260 			if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1261 			    (rxstat & XL_RXSTAT_TCPCKOK))
1262 				sumflags |= M_TCP_CSUM_IN_OK;
1263 
1264 			if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1265 			    (rxstat & XL_RXSTAT_UDPCKOK))
1266 				sumflags |= M_UDP_CSUM_IN_OK;
1267 
1268 			m->m_pkthdr.csum_flags = sumflags;
1269 		}
1270 
1271 		ether_input_mbuf(ifp, m);
1272 	}
1273 	xl_fill_rx_ring(sc);
1274 	/*
1275 	 * Handle the 'end of channel' condition. When the upload
1276 	 * engine hits the end of the RX ring, it will stall. This
1277 	 * is our cue to flush the RX ring, reload the uplist pointer
1278 	 * register and unstall the engine.
1279 	 * XXX This is actually a little goofy. With the ThunderLAN
1280 	 * chip, you get an interrupt when the receiver hits the end
1281 	 * of the receive ring, which tells you exactly when you
1282 	 * you need to reload the ring pointer. Here we have to
1283 	 * fake it. I'm mad at myself for not being clever enough
1284 	 * to avoid the use of a goto here.
1285 	 */
1286 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1287 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1288 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1289 		xl_wait(sc);
1290 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1291 		xl_fill_rx_ring(sc);
1292 		goto again;
1293 	}
1294 
1295 }
1296 
1297 /*
1298  * A frame was downloaded to the chip. It's safe for us to clean up
1299  * the list buffers.
1300  */
1301 void
1302 xl_txeof(struct xl_softc *sc)
1303 {
1304 	struct xl_chain		*cur_tx;
1305 	struct ifnet		*ifp;
1306 
1307 	ifp = &sc->sc_arpcom.ac_if;
1308 
1309 	/*
1310 	 * Go through our tx list and free mbufs for those
1311 	 * frames that have been uploaded. Note: the 3c905B
1312 	 * sets a special bit in the status word to let us
1313 	 * know that a frame has been downloaded, but the
1314 	 * original 3c900/3c905 adapters don't do that.
1315 	 * Consequently, we have to use a different test if
1316 	 * xl_type != XL_TYPE_905B.
1317 	 */
1318 	while (sc->xl_cdata.xl_tx_head != NULL) {
1319 		cur_tx = sc->xl_cdata.xl_tx_head;
1320 
1321 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1322 		    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1323 		    sizeof(struct xl_list),
1324 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1325 
1326 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1327 			break;
1328 
1329 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1330 		ifp->if_opackets++;
1331 		if (cur_tx->map->dm_nsegs != 0) {
1332 			bus_dmamap_t map = cur_tx->map;
1333 
1334 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1335 			    BUS_DMASYNC_POSTWRITE);
1336 			bus_dmamap_unload(sc->sc_dmat, map);
1337 		}
1338 		if (cur_tx->xl_mbuf != NULL) {
1339 			m_freem(cur_tx->xl_mbuf);
1340 			cur_tx->xl_mbuf = NULL;
1341 		}
1342 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1343 		sc->xl_cdata.xl_tx_free = cur_tx;
1344 	}
1345 
1346 	if (sc->xl_cdata.xl_tx_head == NULL) {
1347 		ifp->if_flags &= ~IFF_OACTIVE;
1348 		/* Clear the timeout timer. */
1349 		ifp->if_timer = 0;
1350 		sc->xl_cdata.xl_tx_tail = NULL;
1351 	} else {
1352 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1353 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1354 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1355 			    sc->sc_listmap->dm_segs[0].ds_addr +
1356 			    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1357 			    sc->sc_listkva));
1358 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1359 		}
1360 	}
1361 }
1362 
1363 void
1364 xl_txeof_90xB(struct xl_softc *sc)
1365 {
1366 	struct xl_chain *cur_tx = NULL;
1367 	struct ifnet *ifp;
1368 	int idx;
1369 
1370 	ifp = &sc->sc_arpcom.ac_if;
1371 
1372 	idx = sc->xl_cdata.xl_tx_cons;
1373 	while (idx != sc->xl_cdata.xl_tx_prod) {
1374 
1375 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1376 
1377 		if ((cur_tx->xl_ptr->xl_status &
1378 		    htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1379 			break;
1380 
1381 		if (cur_tx->xl_mbuf != NULL) {
1382 			m_freem(cur_tx->xl_mbuf);
1383 			cur_tx->xl_mbuf = NULL;
1384 		}
1385 
1386 		if (cur_tx->map->dm_nsegs != 0) {
1387 			bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1388 			    0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1389 			bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1390 		}
1391 
1392 		ifp->if_opackets++;
1393 
1394 		sc->xl_cdata.xl_tx_cnt--;
1395 		XL_INC(idx, XL_TX_LIST_CNT);
1396 	}
1397 
1398 	sc->xl_cdata.xl_tx_cons = idx;
1399 
1400 	if (cur_tx != NULL)
1401 		ifp->if_flags &= ~IFF_OACTIVE;
1402 	if (sc->xl_cdata.xl_tx_cnt == 0)
1403 		ifp->if_timer = 0;
1404 }
1405 
1406 /*
1407  * TX 'end of channel' interrupt handler. Actually, we should
1408  * only get a 'TX complete' interrupt if there's a transmit error,
1409  * so this is really TX error handler.
1410  */
1411 void
1412 xl_txeoc(struct xl_softc *sc)
1413 {
1414 	u_int8_t	txstat;
1415 
1416 	while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1417 		if (txstat & XL_TXSTATUS_UNDERRUN ||
1418 			txstat & XL_TXSTATUS_JABBER ||
1419 			txstat & XL_TXSTATUS_RECLAIM) {
1420 			if (txstat != 0x90) {
1421 				printf("%s: transmission error: %x\n",
1422 				    sc->sc_dev.dv_xname, txstat);
1423 			}
1424 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1425 			xl_wait(sc);
1426 			if (sc->xl_type == XL_TYPE_905B) {
1427 				if (sc->xl_cdata.xl_tx_cnt) {
1428 					int i;
1429 					struct xl_chain *c;
1430 
1431 					i = sc->xl_cdata.xl_tx_cons;
1432 					c = &sc->xl_cdata.xl_tx_chain[i];
1433 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1434 					    c->xl_phys);
1435 					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1436 				}
1437 			} else {
1438 				if (sc->xl_cdata.xl_tx_head != NULL)
1439 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1440 					    sc->sc_listmap->dm_segs[0].ds_addr +
1441 					    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1442 					    sc->sc_listkva));
1443 			}
1444 			/*
1445 			 * Remember to set this for the
1446 			 * first generation 3c90X chips.
1447 			 */
1448 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1449 			if (txstat & XL_TXSTATUS_UNDERRUN &&
1450 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1451 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1452 #ifdef notdef
1453 				printf("%s: tx underrun, increasing tx start"
1454 				    " threshold to %d\n", sc->sc_dev.dv_xname,
1455 				    sc->xl_tx_thresh);
1456 #endif
1457 			}
1458 			CSR_WRITE_2(sc, XL_COMMAND,
1459 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1460 			if (sc->xl_type == XL_TYPE_905B) {
1461 				CSR_WRITE_2(sc, XL_COMMAND,
1462 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1463 			}
1464 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1465 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1466 		} else {
1467 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1468 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1469 		}
1470 		/*
1471 		 * Write an arbitrary byte to the TX_STATUS register
1472 	 	 * to clear this interrupt/error and advance to the next.
1473 		 */
1474 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1475 	}
1476 }
1477 
1478 int
1479 xl_intr(void *arg)
1480 {
1481 	struct xl_softc		*sc;
1482 	struct ifnet		*ifp;
1483 	u_int16_t		status;
1484 	int			claimed = 0;
1485 
1486 	sc = arg;
1487 	ifp = &sc->sc_arpcom.ac_if;
1488 
1489 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1490 
1491 		claimed = 1;
1492 
1493 		CSR_WRITE_2(sc, XL_COMMAND,
1494 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1495 
1496 		if (sc->intr_ack)
1497 			(*sc->intr_ack)(sc);
1498 
1499 		if (status & XL_STAT_UP_COMPLETE)
1500 			xl_rxeof(sc);
1501 
1502 
1503 		if (status & XL_STAT_DOWN_COMPLETE) {
1504 			if (sc->xl_type == XL_TYPE_905B)
1505 				xl_txeof_90xB(sc);
1506 			else
1507 				xl_txeof(sc);
1508 		}
1509 
1510 		if (status & XL_STAT_TX_COMPLETE) {
1511 			ifp->if_oerrors++;
1512 			xl_txeoc(sc);
1513 		}
1514 
1515 		if (status & XL_STAT_ADFAIL) {
1516 			xl_reset(sc);
1517 			xl_init(sc);
1518 		}
1519 
1520 		if (status & XL_STAT_STATSOFLOW) {
1521 			sc->xl_stats_no_timeout = 1;
1522 			xl_stats_update(sc);
1523 			sc->xl_stats_no_timeout = 0;
1524 		}
1525 	}
1526 
1527 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1528 		(*ifp->if_start)(ifp);
1529 
1530 	return (claimed);
1531 }
1532 
1533 void
1534 xl_stats_update(void *xsc)
1535 {
1536 	struct xl_softc		*sc;
1537 	struct ifnet		*ifp;
1538 	struct xl_stats		xl_stats;
1539 	u_int8_t		*p;
1540 	int			i;
1541 	struct mii_data		*mii = NULL;
1542 
1543 	bzero(&xl_stats, sizeof(struct xl_stats));
1544 
1545 	sc = xsc;
1546 	ifp = &sc->sc_arpcom.ac_if;
1547 	if (sc->xl_hasmii)
1548 		mii = &sc->sc_mii;
1549 
1550 	p = (u_int8_t *)&xl_stats;
1551 
1552 	/* Read all the stats registers. */
1553 	XL_SEL_WIN(6);
1554 
1555 	for (i = 0; i < 16; i++)
1556 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1557 
1558 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1559 
1560 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1561 				xl_stats.xl_tx_single_collision +
1562 				xl_stats.xl_tx_late_collision;
1563 
1564 	/*
1565 	 * Boomerang and cyclone chips have an extra stats counter
1566 	 * in window 4 (BadSSD). We have to read this too in order
1567 	 * to clear out all the stats registers and avoid a statsoflow
1568 	 * interrupt.
1569 	 */
1570 	XL_SEL_WIN(4);
1571 	CSR_READ_1(sc, XL_W4_BADSSD);
1572 
1573 	if (mii != NULL && (!sc->xl_stats_no_timeout))
1574 		mii_tick(mii);
1575 
1576 	XL_SEL_WIN(7);
1577 
1578 	if (!sc->xl_stats_no_timeout)
1579 		timeout_add_sec(&sc->xl_stsup_tmo, 1);
1580 }
1581 
1582 /*
1583  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1584  * pointers to the fragment pointers.
1585  */
1586 int
1587 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1588 {
1589 	int		error, frag, total_len;
1590 	u_int32_t	status;
1591 	bus_dmamap_t	map;
1592 
1593 	map = sc->sc_tx_sparemap;
1594 
1595 reload:
1596 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1597 	    m_head, BUS_DMA_NOWAIT);
1598 
1599 	if (error && error != EFBIG) {
1600 		m_freem(m_head);
1601 		return (1);
1602 	}
1603 
1604 	/*
1605  	 * Start packing the mbufs in this chain into
1606 	 * the fragment pointers. Stop when we run out
1607  	 * of fragments or hit the end of the mbuf chain.
1608 	 */
1609 	for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1610 		if (frag == XL_MAXFRAGS)
1611 			break;
1612 		total_len += map->dm_segs[frag].ds_len;
1613 		c->xl_ptr->xl_frag[frag].xl_addr =
1614 		    htole32(map->dm_segs[frag].ds_addr);
1615 		c->xl_ptr->xl_frag[frag].xl_len =
1616 		    htole32(map->dm_segs[frag].ds_len);
1617 	}
1618 
1619 	/*
1620 	 * Handle special case: we used up all 63 fragments,
1621 	 * but we have more mbufs left in the chain. Copy the
1622 	 * data into an mbuf cluster. Note that we don't
1623 	 * bother clearing the values in the other fragment
1624 	 * pointers/counters; it wouldn't gain us anything,
1625 	 * and would waste cycles.
1626 	 */
1627 	if (error) {
1628 		struct mbuf	*m_new = NULL;
1629 
1630 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1631 		if (m_new == NULL) {
1632 			m_freem(m_head);
1633 			return (1);
1634 		}
1635 		if (m_head->m_pkthdr.len > MHLEN) {
1636 			MCLGET(m_new, M_DONTWAIT);
1637 			if (!(m_new->m_flags & M_EXT)) {
1638 				m_freem(m_new);
1639 				m_freem(m_head);
1640 				return (1);
1641 			}
1642 		}
1643 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1644 		    mtod(m_new, caddr_t));
1645 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1646 		m_freem(m_head);
1647 		m_head = m_new;
1648 		goto reload;
1649 	}
1650 
1651 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1652 	    BUS_DMASYNC_PREWRITE);
1653 
1654 	if (c->map->dm_nsegs != 0) {
1655 		bus_dmamap_sync(sc->sc_dmat, c->map,
1656 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1657 		bus_dmamap_unload(sc->sc_dmat, c->map);
1658 	}
1659 
1660 	c->xl_mbuf = m_head;
1661 	sc->sc_tx_sparemap = c->map;
1662 	c->map = map;
1663 	c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1664 	c->xl_ptr->xl_status = htole32(total_len);
1665 	c->xl_ptr->xl_next = 0;
1666 
1667 	if (sc->xl_type == XL_TYPE_905B) {
1668 		status = XL_TXSTAT_RND_DEFEAT;
1669 
1670 #ifndef XL905B_TXCSUM_BROKEN
1671 		if (m_head->m_pkthdr.csum_flags) {
1672 			if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1673 				status |= XL_TXSTAT_IPCKSUM;
1674 			if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1675 				status |= XL_TXSTAT_TCPCKSUM;
1676 			if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1677 				status |= XL_TXSTAT_UDPCKSUM;
1678 		}
1679 #endif
1680 		c->xl_ptr->xl_status = htole32(status);
1681 	}
1682 
1683 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1684 	    offsetof(struct xl_list_data, xl_tx_list[0]),
1685 	    sizeof(struct xl_list) * XL_TX_LIST_CNT,
1686 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1687 
1688 	return (0);
1689 }
1690 
1691 /*
1692  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1693  * to the mbuf data regions directly in the transmit lists. We also save a
1694  * copy of the pointers since the transmit list fragment pointers are
1695  * physical addresses.
1696  */
1697 void
1698 xl_start(struct ifnet *ifp)
1699 {
1700 	struct xl_softc		*sc;
1701 	struct mbuf		*m_head = NULL;
1702 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1703 	struct xl_chain		*prev_tx;
1704 	int			error;
1705 
1706 	sc = ifp->if_softc;
1707 
1708 	/*
1709 	 * Check for an available queue slot. If there are none,
1710 	 * punt.
1711 	 */
1712 	if (sc->xl_cdata.xl_tx_free == NULL) {
1713 		xl_txeoc(sc);
1714 		xl_txeof(sc);
1715 		if (sc->xl_cdata.xl_tx_free == NULL) {
1716 			ifp->if_flags |= IFF_OACTIVE;
1717 			return;
1718 		}
1719 	}
1720 
1721 	start_tx = sc->xl_cdata.xl_tx_free;
1722 
1723 	while (sc->xl_cdata.xl_tx_free != NULL) {
1724 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1725 		if (m_head == NULL)
1726 			break;
1727 
1728 		/* Pick a descriptor off the free list. */
1729 		prev_tx = cur_tx;
1730 		cur_tx = sc->xl_cdata.xl_tx_free;
1731 
1732 		/* Pack the data into the descriptor. */
1733 		error = xl_encap(sc, cur_tx, m_head);
1734 		if (error) {
1735 			cur_tx = prev_tx;
1736 			continue;
1737 		}
1738 
1739 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1740 		cur_tx->xl_next = NULL;
1741 
1742 		/* Chain it together. */
1743 		if (prev != NULL) {
1744 			prev->xl_next = cur_tx;
1745 			prev->xl_ptr->xl_next =
1746 			    sc->sc_listmap->dm_segs[0].ds_addr +
1747 			    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1748 
1749 		}
1750 		prev = cur_tx;
1751 
1752 #if NBPFILTER > 0
1753 		/*
1754 		 * If there's a BPF listener, bounce a copy of this frame
1755 		 * to him.
1756 		 */
1757 		if (ifp->if_bpf)
1758 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1759 			    BPF_DIRECTION_OUT);
1760 #endif
1761 	}
1762 
1763 	/*
1764 	 * If there are no packets queued, bail.
1765 	 */
1766 	if (cur_tx == NULL)
1767 		return;
1768 
1769 	/*
1770 	 * Place the request for the upload interrupt
1771 	 * in the last descriptor in the chain. This way, if
1772 	 * we're chaining several packets at once, we'll only
1773 	 * get an interrupt once for the whole chain rather than
1774 	 * once for each packet.
1775 	 */
1776 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1777 
1778 	/*
1779 	 * Queue the packets. If the TX channel is clear, update
1780 	 * the downlist pointer register.
1781 	 */
1782 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1783 	xl_wait(sc);
1784 
1785 	if (sc->xl_cdata.xl_tx_head != NULL) {
1786 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1787 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1788 		    sc->sc_listmap->dm_segs[0].ds_addr +
1789 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1790 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1791 		    htole32(~XL_TXSTAT_DL_INTR);
1792 		sc->xl_cdata.xl_tx_tail = cur_tx;
1793 	} else {
1794 		sc->xl_cdata.xl_tx_head = start_tx;
1795 		sc->xl_cdata.xl_tx_tail = cur_tx;
1796 	}
1797 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1798 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1799 		    sc->sc_listmap->dm_segs[0].ds_addr +
1800 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1801 
1802 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1803 
1804 	XL_SEL_WIN(7);
1805 
1806 	/*
1807 	 * Set a timeout in case the chip goes out to lunch.
1808 	 */
1809 	ifp->if_timer = 5;
1810 
1811 	/*
1812 	 * XXX Under certain conditions, usually on slower machines
1813 	 * where interrupts may be dropped, it's possible for the
1814 	 * adapter to chew up all the buffers in the receive ring
1815 	 * and stall, without us being able to do anything about it.
1816 	 * To guard against this, we need to make a pass over the
1817 	 * RX queue to make sure there aren't any packets pending.
1818 	 * Doing it here means we can flush the receive ring at the
1819 	 * same time the chip is DMAing the transmit descriptors we
1820 	 * just gave it.
1821  	 *
1822 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1823 	 * nature of their chips in all their marketing literature;
1824 	 * we may as well take advantage of it. :)
1825 	 */
1826 	xl_rxeof(sc);
1827 }
1828 
1829 void
1830 xl_start_90xB(struct ifnet *ifp)
1831 {
1832 	struct xl_softc	*sc;
1833 	struct mbuf	*m_head = NULL;
1834 	struct xl_chain	*prev = NULL, *cur_tx = NULL, *start_tx;
1835 	struct xl_chain	*prev_tx;
1836 	int		error, idx;
1837 
1838 	sc = ifp->if_softc;
1839 
1840 	if (ifp->if_flags & IFF_OACTIVE)
1841 		return;
1842 
1843 	idx = sc->xl_cdata.xl_tx_prod;
1844 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1845 
1846 	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1847 
1848 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1849 			ifp->if_flags |= IFF_OACTIVE;
1850 			break;
1851 		}
1852 
1853 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1854 		if (m_head == NULL)
1855 			break;
1856 
1857 		prev_tx = cur_tx;
1858 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1859 
1860 		/* Pack the data into the descriptor. */
1861 		error = xl_encap(sc, cur_tx, m_head);
1862 		if (error) {
1863 			cur_tx = prev_tx;
1864 			continue;
1865 		}
1866 
1867 		/* Chain it together. */
1868 		if (prev != NULL)
1869 			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1870 		prev = cur_tx;
1871 
1872 #if NBPFILTER > 0
1873 		/*
1874 		 * If there's a BPF listener, bounce a copy of this frame
1875 		 * to him.
1876 		 */
1877 		if (ifp->if_bpf)
1878 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1879 			    BPF_DIRECTION_OUT);
1880 #endif
1881 
1882 		XL_INC(idx, XL_TX_LIST_CNT);
1883 		sc->xl_cdata.xl_tx_cnt++;
1884 	}
1885 
1886 	/*
1887 	 * If there are no packets queued, bail.
1888 	 */
1889 	if (cur_tx == NULL)
1890 		return;
1891 
1892 	/*
1893 	 * Place the request for the upload interrupt
1894 	 * in the last descriptor in the chain. This way, if
1895 	 * we're chaining several packets at once, we'll only
1896 	 * get an interrupt once for the whole chain rather than
1897 	 * once for each packet.
1898 	 */
1899 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1900 
1901 	/* Start transmission */
1902 	sc->xl_cdata.xl_tx_prod = idx;
1903 	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1904 
1905 	/*
1906 	 * Set a timeout in case the chip goes out to lunch.
1907 	 */
1908 	ifp->if_timer = 5;
1909 }
1910 
1911 void
1912 xl_init(void *xsc)
1913 {
1914 	struct xl_softc		*sc = xsc;
1915 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
1916 	int			s, i;
1917 	struct mii_data		*mii = NULL;
1918 
1919 	s = splnet();
1920 
1921 	/*
1922 	 * Cancel pending I/O and free all RX/TX buffers.
1923 	 */
1924 	xl_stop(sc);
1925 
1926 	if (sc->xl_hasmii)
1927 		mii = &sc->sc_mii;
1928 
1929 	if (mii == NULL) {
1930 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1931 		xl_wait(sc);
1932 	}
1933 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1934 	xl_wait(sc);
1935 	DELAY(10000);
1936 
1937 	/* Init our MAC address */
1938 	XL_SEL_WIN(2);
1939 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1940 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1941 				sc->sc_arpcom.ac_enaddr[i]);
1942 	}
1943 
1944 	/* Clear the station mask. */
1945 	for (i = 0; i < 3; i++)
1946 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1947 #ifdef notdef
1948 	/* Reset TX and RX. */
1949 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1950 	xl_wait(sc);
1951 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1952 	xl_wait(sc);
1953 #endif
1954 	/* Init circular RX list. */
1955 	if (xl_list_rx_init(sc) == ENOBUFS) {
1956 		printf("%s: initialization failed: no "
1957 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1958 		xl_stop(sc);
1959 		splx(s);
1960 		return;
1961 	}
1962 
1963 	/* Init TX descriptors. */
1964 	if (sc->xl_type == XL_TYPE_905B)
1965 		xl_list_tx_init_90xB(sc);
1966 	else
1967 		xl_list_tx_init(sc);
1968 
1969 	/*
1970 	 * Set the TX freethresh value.
1971 	 * Note that this has no effect on 3c905B "cyclone"
1972 	 * cards but is required for 3c900/3c905 "boomerang"
1973 	 * cards in order to enable the download engine.
1974 	 */
1975 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1976 
1977 	/* Set the TX start threshold for best performance. */
1978 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1979 
1980 	/*
1981 	 * If this is a 3c905B, also set the tx reclaim threshold.
1982 	 * This helps cut down on the number of tx reclaim errors
1983 	 * that could happen on a busy network. The chip multiplies
1984 	 * the register value by 16 to obtain the actual threshold
1985 	 * in bytes, so we divide by 16 when setting the value here.
1986 	 * The existing threshold value can be examined by reading
1987 	 * the register at offset 9 in window 5.
1988 	 */
1989 	if (sc->xl_type == XL_TYPE_905B) {
1990 		CSR_WRITE_2(sc, XL_COMMAND,
1991 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1992 	}
1993 
1994 	/* Program promiscuous mode and multicast filters. */
1995 	xl_iff(sc);
1996 
1997 	/*
1998 	 * Load the address of the RX list. We have to
1999 	 * stall the upload engine before we can manipulate
2000 	 * the uplist pointer register, then unstall it when
2001 	 * we're finished. We also have to wait for the
2002 	 * stall command to complete before proceeding.
2003 	 * Note that we have to do this after any RX resets
2004 	 * have completed since the uplist register is cleared
2005 	 * by a reset.
2006 	 */
2007 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2008 	xl_wait(sc);
2009 	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
2010 	    offsetof(struct xl_list_data, xl_rx_list[0]));
2011 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2012 	xl_wait(sc);
2013 
2014 	if (sc->xl_type == XL_TYPE_905B) {
2015 		/* Set polling interval */
2016 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2017 		/* Load the address of the TX list */
2018 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2019 		xl_wait(sc);
2020 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2021 		    sc->sc_listmap->dm_segs[0].ds_addr +
2022 		    offsetof(struct xl_list_data, xl_tx_list[0]));
2023 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2024 		xl_wait(sc);
2025 	}
2026 
2027 	/*
2028 	 * If the coax transceiver is on, make sure to enable
2029 	 * the DC-DC converter.
2030  	 */
2031 	XL_SEL_WIN(3);
2032 	if (sc->xl_xcvr == XL_XCVR_COAX)
2033 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2034 	else
2035 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2036 
2037 	/*
2038 	 * increase packet size to allow reception of 802.1q or ISL packets.
2039 	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2040 	 * control register. For 3c90xB/C chips, use the RX packet size
2041 	 * register.
2042 	 */
2043 
2044 	if (sc->xl_type == XL_TYPE_905B)
2045 		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2046 	else {
2047 		u_int8_t macctl;
2048 		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2049 		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2050 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2051 	}
2052 
2053 	/* Clear out the stats counters. */
2054 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2055 	sc->xl_stats_no_timeout = 1;
2056 	xl_stats_update(sc);
2057 	sc->xl_stats_no_timeout = 0;
2058 	XL_SEL_WIN(4);
2059 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2060 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2061 
2062 	/*
2063 	 * Enable interrupts.
2064 	 */
2065 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2066 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2067 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2068 
2069 	if (sc->intr_ack)
2070 		(*sc->intr_ack)(sc);
2071 
2072 	/* Set the RX early threshold */
2073 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2074 	CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2075 
2076 	/* Enable receiver and transmitter. */
2077 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2078 	xl_wait(sc);
2079 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2080 	xl_wait(sc);
2081 
2082 	/* Restore state of BMCR */
2083 	if (mii != NULL)
2084 		mii_mediachg(mii);
2085 
2086 	/* Select window 7 for normal operations. */
2087 	XL_SEL_WIN(7);
2088 
2089 	ifp->if_flags |= IFF_RUNNING;
2090 	ifp->if_flags &= ~IFF_OACTIVE;
2091 
2092 	splx(s);
2093 
2094 	timeout_add_sec(&sc->xl_stsup_tmo, 1);
2095 }
2096 
2097 /*
2098  * Set media options.
2099  */
2100 int
2101 xl_ifmedia_upd(struct ifnet *ifp)
2102 {
2103 	struct xl_softc		*sc;
2104 	struct ifmedia		*ifm = NULL;
2105 	struct mii_data		*mii = NULL;
2106 
2107 	sc = ifp->if_softc;
2108 
2109 	if (sc->xl_hasmii)
2110 		mii = &sc->sc_mii;
2111 	if (mii == NULL)
2112 		ifm = &sc->ifmedia;
2113 	else
2114 		ifm = &mii->mii_media;
2115 
2116 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2117 	case IFM_100_FX:
2118 	case IFM_10_FL:
2119 	case IFM_10_2:
2120 	case IFM_10_5:
2121 		xl_setmode(sc, ifm->ifm_media);
2122 		return (0);
2123 		break;
2124 	default:
2125 		break;
2126 	}
2127 
2128 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2129 		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2130 		xl_init(sc);
2131 	} else {
2132 		xl_setmode(sc, ifm->ifm_media);
2133 	}
2134 
2135 	return (0);
2136 }
2137 
2138 /*
2139  * Report current media status.
2140  */
2141 void
2142 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2143 {
2144 	struct xl_softc		*sc;
2145 	u_int32_t		icfg;
2146 	u_int16_t		status = 0;
2147 	struct mii_data		*mii = NULL;
2148 
2149 	sc = ifp->if_softc;
2150 	if (sc->xl_hasmii != 0)
2151 		mii = &sc->sc_mii;
2152 
2153 	XL_SEL_WIN(4);
2154 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2155 
2156 	XL_SEL_WIN(3);
2157 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2158 	icfg >>= XL_ICFG_CONNECTOR_BITS;
2159 
2160 	ifmr->ifm_active = IFM_ETHER;
2161 	ifmr->ifm_status = IFM_AVALID;
2162 
2163 	if ((status & XL_MEDIASTAT_CARRIER) == 0)
2164 		ifmr->ifm_status |= IFM_ACTIVE;
2165 
2166 	switch(icfg) {
2167 	case XL_XCVR_10BT:
2168 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2169 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2170 			ifmr->ifm_active |= IFM_FDX;
2171 		else
2172 			ifmr->ifm_active |= IFM_HDX;
2173 		break;
2174 	case XL_XCVR_AUI:
2175 		if (sc->xl_type == XL_TYPE_905B &&
2176 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2177 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2178 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2179 				ifmr->ifm_active |= IFM_FDX;
2180 			else
2181 				ifmr->ifm_active |= IFM_HDX;
2182 		} else
2183 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2184 		break;
2185 	case XL_XCVR_COAX:
2186 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2187 		break;
2188 	/*
2189 	 * XXX MII and BTX/AUTO should be separate cases.
2190 	 */
2191 
2192 	case XL_XCVR_100BTX:
2193 	case XL_XCVR_AUTO:
2194 	case XL_XCVR_MII:
2195 		if (mii != NULL) {
2196 			mii_pollstat(mii);
2197 			ifmr->ifm_active = mii->mii_media_active;
2198 			ifmr->ifm_status = mii->mii_media_status;
2199 		}
2200 		break;
2201 	case XL_XCVR_100BFX:
2202 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2203 		break;
2204 	default:
2205 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2206 		break;
2207 	}
2208 }
2209 
2210 int
2211 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2212 {
2213 	struct xl_softc *sc = ifp->if_softc;
2214 	struct ifreq *ifr = (struct ifreq *)data;
2215 	struct ifaddr *ifa = (struct ifaddr *)data;
2216 	int s, error = 0;
2217 	struct mii_data *mii = NULL;
2218 
2219 	s = splnet();
2220 
2221 	switch(command) {
2222 	case SIOCSIFADDR:
2223 		ifp->if_flags |= IFF_UP;
2224 		if (!(ifp->if_flags & IFF_RUNNING))
2225 			xl_init(sc);
2226 #ifdef INET
2227 		if (ifa->ifa_addr->sa_family == AF_INET)
2228 			arp_ifinit(&sc->sc_arpcom, ifa);
2229 #endif
2230 		break;
2231 
2232 	case SIOCSIFFLAGS:
2233 		if (ifp->if_flags & IFF_UP) {
2234 			if (ifp->if_flags & IFF_RUNNING)
2235 				error = ENETRESET;
2236 			else
2237 				xl_init(sc);
2238 		} else {
2239 			if (ifp->if_flags & IFF_RUNNING)
2240 				xl_stop(sc);
2241 		}
2242 		break;
2243 
2244 	case SIOCGIFMEDIA:
2245 	case SIOCSIFMEDIA:
2246 		if (sc->xl_hasmii != 0)
2247 			mii = &sc->sc_mii;
2248 		if (mii == NULL)
2249 			error = ifmedia_ioctl(ifp, ifr,
2250 			    &sc->ifmedia, command);
2251 		else
2252 			error = ifmedia_ioctl(ifp, ifr,
2253 			    &mii->mii_media, command);
2254 		break;
2255 
2256 	default:
2257 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2258 	}
2259 
2260 	if (error == ENETRESET) {
2261 		if (ifp->if_flags & IFF_RUNNING)
2262 			xl_iff(sc);
2263 		error = 0;
2264 	}
2265 
2266 	splx(s);
2267 	return (error);
2268 }
2269 
2270 void
2271 xl_watchdog(struct ifnet *ifp)
2272 {
2273 	struct xl_softc		*sc;
2274 	u_int16_t		status = 0;
2275 
2276 	sc = ifp->if_softc;
2277 
2278 	ifp->if_oerrors++;
2279 	XL_SEL_WIN(4);
2280 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2281 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2282 
2283 	if (status & XL_MEDIASTAT_CARRIER)
2284 		printf("%s: no carrier - transceiver cable problem?\n",
2285 								sc->sc_dev.dv_xname);
2286 	xl_txeoc(sc);
2287 	xl_txeof(sc);
2288 	xl_rxeof(sc);
2289 	xl_reset(sc);
2290 	xl_init(sc);
2291 
2292 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2293 		(*ifp->if_start)(ifp);
2294 }
2295 
2296 void
2297 xl_freetxrx(struct xl_softc *sc)
2298 {
2299 	bus_dmamap_t	map;
2300 	int		i;
2301 
2302 	/*
2303 	 * Free data in the RX lists.
2304 	 */
2305 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2306 		if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2307 			map = sc->xl_cdata.xl_rx_chain[i].map;
2308 
2309 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2310 			    BUS_DMASYNC_POSTREAD);
2311 			bus_dmamap_unload(sc->sc_dmat, map);
2312 		}
2313 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2314 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2315 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2316 		}
2317 	}
2318 	bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list));
2319 	/*
2320 	 * Free the TX list buffers.
2321 	 */
2322 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2323 		if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2324 			map = sc->xl_cdata.xl_tx_chain[i].map;
2325 
2326 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2327 			    BUS_DMASYNC_POSTWRITE);
2328 			bus_dmamap_unload(sc->sc_dmat, map);
2329 		}
2330 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2331 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2332 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2333 		}
2334 	}
2335 	bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list));
2336 }
2337 
2338 /*
2339  * Stop the adapter and free any mbufs allocated to the
2340  * RX and TX lists.
2341  */
2342 void
2343 xl_stop(struct xl_softc *sc)
2344 {
2345 	struct ifnet *ifp;
2346 
2347 	/* Stop the stats updater. */
2348 	timeout_del(&sc->xl_stsup_tmo);
2349 
2350 	ifp = &sc->sc_arpcom.ac_if;
2351 	ifp->if_timer = 0;
2352 
2353 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2354 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2355 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2356 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2357 	xl_wait(sc);
2358 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2359 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2360 	DELAY(800);
2361 
2362 #ifdef foo
2363 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2364 	xl_wait(sc);
2365 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2366 	xl_wait(sc);
2367 #endif
2368 
2369 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2370 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2371 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2372 
2373 	if (sc->intr_ack)
2374 		(*sc->intr_ack)(sc);
2375 
2376 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2377 
2378 	xl_freetxrx(sc);
2379 }
2380 
2381 #ifndef SMALL_KERNEL
2382 void
2383 xl_wol_power(struct xl_softc *sc)
2384 {
2385 	/* Re-enable RX and call upper layer WOL power routine
2386 	 * if WOL is enabled. */
2387 	if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) {
2388 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2389 		sc->wol_power(sc->wol_power_arg);
2390 	}
2391 }
2392 #endif
2393 
2394 void
2395 xl_attach(struct xl_softc *sc)
2396 {
2397 	u_int8_t enaddr[ETHER_ADDR_LEN];
2398 	u_int16_t		xcvr[2];
2399 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2400 	int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2401 	struct ifmedia *ifm;
2402 
2403 	i = splnet();
2404 	xl_reset(sc);
2405 	splx(i);
2406 
2407 	/*
2408 	 * Get station address from the EEPROM.
2409 	 */
2410 	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2411 		printf("\n%s: failed to read station address\n",
2412 		    sc->sc_dev.dv_xname);
2413 		return;
2414 	}
2415 	bcopy(enaddr, &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
2416 
2417 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2418 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2419 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2420 		printf(": can't alloc list mem\n");
2421 		return;
2422 	}
2423 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2424 	    sizeof(struct xl_list_data), &sc->sc_listkva,
2425 	    BUS_DMA_NOWAIT) != 0) {
2426 		printf(": can't map list mem\n");
2427 		return;
2428 	}
2429 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2430 	    sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2431 	    &sc->sc_listmap) != 0) {
2432 		printf(": can't alloc list map\n");
2433 		return;
2434 	}
2435 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2436 	    sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2437 		printf(": can't load list map\n");
2438 		return;
2439 	}
2440 	sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2441 
2442 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2443 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2444 		    0, BUS_DMA_NOWAIT,
2445 		    &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2446 			printf(": can't create rx map\n");
2447 			return;
2448 		}
2449 	}
2450 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2451 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2452 		printf(": can't create rx spare map\n");
2453 		return;
2454 	}
2455 
2456 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2457 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2458 		    XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2459 		    &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2460 			printf(": can't create tx map\n");
2461 			return;
2462 		}
2463 	}
2464 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2465 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2466 		printf(": can't create tx spare map\n");
2467 		return;
2468 	}
2469 
2470 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2471 
2472 	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2473 		u_int16_t n;
2474 
2475 		XL_SEL_WIN(2);
2476 		n = CSR_READ_2(sc, 12);
2477 
2478 		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2479 			n |= 0x0010;
2480 
2481 		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2482 			n |= 0x4000;
2483 
2484 		CSR_WRITE_2(sc, 12, n);
2485 	}
2486 
2487 	/*
2488 	 * Figure out the card type. 3c905B adapters have the
2489 	 * 'supportsNoTxLength' bit set in the capabilities
2490 	 * word in the EEPROM.
2491 	 * Note: my 3c575C cardbus card lies. It returns a value
2492 	 * of 0x1578 for its capabilities word, which is somewhat
2493 	 * nonsensical. Another way to distinguish a 3c90x chip
2494 	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2495 	 * bit. This will only be set for 3c90x boomerage chips.
2496 	 */
2497 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2498 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2499 	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2500 		sc->xl_type = XL_TYPE_905B;
2501 	else
2502 		sc->xl_type = XL_TYPE_90X;
2503 
2504 	/* Set the TX start threshold for best performance. */
2505 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2506 
2507 	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2508 
2509 	ifp->if_softc = sc;
2510 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2511 	ifp->if_ioctl = xl_ioctl;
2512 	if (sc->xl_type == XL_TYPE_905B)
2513 		ifp->if_start = xl_start_90xB;
2514 	else
2515 		ifp->if_start = xl_start;
2516 	ifp->if_watchdog = xl_watchdog;
2517 	ifp->if_baudrate = 10000000;
2518 	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2519 	IFQ_SET_READY(&ifp->if_snd);
2520 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2521 
2522 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2523 
2524 #ifndef XL905B_TXCSUM_BROKEN
2525 	ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2526 				IFCAP_CSUM_UDPv4;
2527 #endif
2528 
2529 	XL_SEL_WIN(3);
2530 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2531 
2532 	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2533 	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2534 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2535 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2536 
2537 	xl_mediacheck(sc);
2538 
2539 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2540 	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2541 		ifmedia_init(&sc->sc_mii.mii_media, 0,
2542 		    xl_ifmedia_upd, xl_ifmedia_sts);
2543 		sc->xl_hasmii = 1;
2544 		sc->sc_mii.mii_ifp = ifp;
2545 		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2546 		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2547 		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2548 		xl_setcfg(sc);
2549 		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2550 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2551 
2552 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2553 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2554 			    0, NULL);
2555 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2556 		}
2557 		else {
2558 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2559 		}
2560 		ifm = &sc->sc_mii.mii_media;
2561 	}
2562 	else {
2563 		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2564 		sc->xl_hasmii = 0;
2565 		ifm = &sc->ifmedia;
2566 	}
2567 
2568 	/*
2569 	 * Sanity check. If the user has selected "auto" and this isn't
2570 	 * a 10/100 card of some kind, we need to force the transceiver
2571 	 * type to something sane.
2572 	 */
2573 	if (sc->xl_xcvr == XL_XCVR_AUTO) {
2574 		xl_choose_xcvr(sc, 0);
2575 		i = splnet();
2576 		xl_reset(sc);
2577 		splx(i);
2578 	}
2579 
2580 	if (sc->xl_media & XL_MEDIAOPT_BT) {
2581 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2582 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2583 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2584 			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2585 	}
2586 
2587 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2588 		/*
2589 		 * Check for a 10baseFL board in disguise.
2590 		 */
2591 		if (sc->xl_type == XL_TYPE_905B &&
2592 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2593 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2594 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2595 			    0, NULL);
2596 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2597 				ifmedia_add(ifm,
2598 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2599 		} else {
2600 			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2601 		}
2602 	}
2603 
2604 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2605 		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2606 	}
2607 
2608 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2609 		ifp->if_baudrate = 100000000;
2610 		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2611 	}
2612 
2613 	/* Choose a default media. */
2614 	switch(sc->xl_xcvr) {
2615 	case XL_XCVR_10BT:
2616 		media = IFM_ETHER|IFM_10_T;
2617 		xl_setmode(sc, media);
2618 		break;
2619 	case XL_XCVR_AUI:
2620 		if (sc->xl_type == XL_TYPE_905B &&
2621 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2622 			media = IFM_ETHER|IFM_10_FL;
2623 			xl_setmode(sc, media);
2624 		} else {
2625 			media = IFM_ETHER|IFM_10_5;
2626 			xl_setmode(sc, media);
2627 		}
2628 		break;
2629 	case XL_XCVR_COAX:
2630 		media = IFM_ETHER|IFM_10_2;
2631 		xl_setmode(sc, media);
2632 		break;
2633 	case XL_XCVR_AUTO:
2634 	case XL_XCVR_100BTX:
2635 	case XL_XCVR_MII:
2636 		/* Chosen by miibus */
2637 		break;
2638 	case XL_XCVR_100BFX:
2639 		media = IFM_ETHER|IFM_100_FX;
2640 		xl_setmode(sc, media);
2641 		break;
2642 	default:
2643 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2644 							sc->xl_xcvr);
2645 		/*
2646 		 * This will probably be wrong, but it prevents
2647 		 * the ifmedia code from panicking.
2648 		 */
2649 		media = IFM_ETHER | IFM_10_T;
2650 		break;
2651 	}
2652 
2653 	if (sc->xl_hasmii == 0)
2654 		ifmedia_set(&sc->ifmedia, media);
2655 
2656 	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2657 		XL_SEL_WIN(0);
2658 		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2659 	}
2660 
2661 #ifndef SMALL_KERNEL
2662 	/* Check availability of WOL. */
2663 	if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) {
2664 		ifp->if_capabilities |= IFCAP_WOL;
2665 		ifp->if_wol = xl_wol;
2666 		xl_wol(ifp, 0);
2667 	}
2668 #endif
2669 
2670 	/*
2671 	 * Call MI attach routines.
2672 	 */
2673 	if_attach(ifp);
2674 	ether_ifattach(ifp);
2675 	m_clsetwms(ifp, MCLBYTES, 2, XL_RX_LIST_CNT - 1);
2676 }
2677 
2678 int
2679 xl_detach(struct xl_softc *sc)
2680 {
2681 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2682 	extern void xl_freetxrx(struct xl_softc *);
2683 
2684 	/* Unhook our tick handler. */
2685 	timeout_del(&sc->xl_stsup_tmo);
2686 
2687 	xl_freetxrx(sc);
2688 
2689 	/* Detach all PHYs */
2690 	if (sc->xl_hasmii)
2691 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2692 
2693 	/* Delete all remaining media. */
2694 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2695 
2696 	ether_ifdetach(ifp);
2697 	if_detach(ifp);
2698 
2699 	return (0);
2700 }
2701 
2702 #ifndef SMALL_KERNEL
2703 int
2704 xl_wol(struct ifnet *ifp, int enable)
2705 {
2706 	struct xl_softc		*sc = ifp->if_softc;
2707 
2708 	XL_SEL_WIN(7);
2709 	if (enable) {
2710 		if (!(ifp->if_flags & IFF_RUNNING))
2711 			xl_init(sc);
2712 		CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC);
2713 		sc->xl_flags |= XL_FLAG_WOL;
2714 	} else {
2715 		CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
2716 		sc->xl_flags &= ~XL_FLAG_WOL;
2717 	}
2718 	return (0);
2719 }
2720 #endif
2721 
2722 struct cfdriver xl_cd = {
2723 	0, "xl", DV_IFNET
2724 };
2725