xref: /openbsd-src/sys/dev/ic/xl.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: xl.c,v 1.131 2016/04/13 10:49:26 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35  */
36 
37 /*
38  * 3Com 3c90x Etherlink XL PCI NIC driver
39  *
40  * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41  * bus-master chips (3c90x cards and embedded controllers) including
42  * the following:
43  *
44  * 3Com 3c900-TPO	10Mbps/RJ-45
45  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46  * 3Com 3c905-TX	10/100Mbps/RJ-45
47  * 3Com 3c905-T4	10/100Mbps/RJ-45
48  * 3Com 3c900B-TPO	10Mbps/RJ-45
49  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51  * 3Com 3c900B-FL	10Mbps/Fiber-optic
52  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53  * 3Com 3c905B-TX	10/100Mbps/RJ-45
54  * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
55  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
57  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
58  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
59  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
60  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63  * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64  * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65  * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66  * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67  * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68  * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70  * Dell on-board 3c920 10/100Mbps/RJ-45
71  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72  * Dell Latitude laptop docking station embedded 3c905-TX
73  *
74  * Written by Bill Paul <wpaul@ctr.columbia.edu>
75  * Electrical Engineering Department
76  * Columbia University, New York City
77  */
78 
79 /*
80  * The 3c90x series chips use a bus-master DMA interface for transferring
81  * packets to and from the controller chip. Some of the "vortex" cards
82  * (3c59x) also supported a bus master mode, however for those chips
83  * you could only DMA packets to/from a contiguous memory buffer. For
84  * transmission this would mean copying the contents of the queued mbuf
85  * chain into an mbuf cluster and then DMAing the cluster. This extra
86  * copy would sort of defeat the purpose of the bus master support for
87  * any packet that doesn't fit into a single mbuf.
88  *
89  * By contrast, the 3c90x cards support a fragment-based bus master
90  * mode where mbuf chains can be encapsulated using TX descriptors.
91  * This is similar to other PCI chips such as the Texas Instruments
92  * ThunderLAN and the Intel 82557/82558.
93  *
94  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95  * bus master chips because they maintain the old PIO interface for
96  * backwards compatibility, but starting with the 3c905B and the
97  * "cyclone" chips, the compatibility interface has been dropped.
98  * Since using bus master DMA is a big win, we use this driver to
99  * support the PCI "boomerang" chips even though they work with the
100  * "vortex" driver in order to obtain better performance.
101  */
102 
103 #include "bpfilter.h"
104 
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/mbuf.h>
108 #include <sys/protosw.h>
109 #include <sys/socket.h>
110 #include <sys/ioctl.h>
111 #include <sys/errno.h>
112 #include <sys/malloc.h>
113 #include <sys/kernel.h>
114 #include <sys/device.h>
115 
116 #include <net/if.h>
117 #include <net/if_media.h>
118 
119 #include <netinet/in.h>
120 #include <netinet/if_ether.h>
121 
122 #include <dev/mii/miivar.h>
123 
124 #include <machine/bus.h>
125 
126 #if NBPFILTER > 0
127 #include <net/bpf.h>
128 #endif
129 
130 #include <dev/ic/xlreg.h>
131 
132 /*
133  * TX Checksumming is disabled by default for two reasons:
134  * - TX Checksumming will occasionally produce corrupt packets
135  * - TX Checksumming seems to reduce performance
136  *
137  * Only 905B/C cards were reported to have this problem, it is possible
138  * that later chips _may_ be immune.
139  */
140 #define	XL905B_TXCSUM_BROKEN	1
141 
142 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
143 void xl_stats_update(void *);
144 int xl_encap(struct xl_softc *, struct xl_chain *,
145     struct mbuf * );
146 void xl_rxeof(struct xl_softc *);
147 void xl_txeof(struct xl_softc *);
148 void xl_txeof_90xB(struct xl_softc *);
149 void xl_txeoc(struct xl_softc *);
150 int xl_intr(void *);
151 void xl_start(struct ifnet *);
152 void xl_start_90xB(struct ifnet *);
153 int xl_ioctl(struct ifnet *, u_long, caddr_t);
154 void xl_freetxrx(struct xl_softc *);
155 void xl_watchdog(struct ifnet *);
156 int xl_ifmedia_upd(struct ifnet *);
157 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
158 
159 int xl_eeprom_wait(struct xl_softc *);
160 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
161 void xl_mii_sync(struct xl_softc *);
162 void xl_mii_send(struct xl_softc *, u_int32_t, int);
163 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
164 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
165 
166 void xl_setcfg(struct xl_softc *);
167 void xl_setmode(struct xl_softc *, uint64_t);
168 void xl_iff(struct xl_softc *);
169 void xl_iff_90x(struct xl_softc *);
170 void xl_iff_905b(struct xl_softc *);
171 int xl_list_rx_init(struct xl_softc *);
172 void xl_fill_rx_ring(struct xl_softc *);
173 int xl_list_tx_init(struct xl_softc *);
174 int xl_list_tx_init_90xB(struct xl_softc *);
175 void xl_wait(struct xl_softc *);
176 void xl_mediacheck(struct xl_softc *);
177 void xl_choose_xcvr(struct xl_softc *, int);
178 
179 int xl_miibus_readreg(struct device *, int, int);
180 void xl_miibus_writereg(struct device *, int, int, int);
181 void xl_miibus_statchg(struct device *);
182 #ifndef SMALL_KERNEL
183 int xl_wol(struct ifnet *, int);
184 void xl_wol_power(struct xl_softc *);
185 #endif
186 
187 int
188 xl_activate(struct device *self, int act)
189 {
190 	struct xl_softc *sc = (struct xl_softc *)self;
191 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
192 	int rv = 0;
193 
194 	switch (act) {
195 	case DVACT_SUSPEND:
196 		if (ifp->if_flags & IFF_RUNNING)
197 			xl_stop(sc);
198 		rv = config_activate_children(self, act);
199 		break;
200 	case DVACT_RESUME:
201 		if (ifp->if_flags & IFF_UP)
202 			xl_init(sc);
203 		break;
204 	case DVACT_POWERDOWN:
205 		rv = config_activate_children(self, act);
206 #ifndef SMALL_KERNEL
207 		xl_wol_power(sc);
208 #endif
209 		break;
210 	default:
211 		rv = config_activate_children(self, act);
212 		break;
213 	}
214 	return (rv);
215 }
216 
217 /*
218  * Murphy's law says that it's possible the chip can wedge and
219  * the 'command in progress' bit may never clear. Hence, we wait
220  * only a finite amount of time to avoid getting caught in an
221  * infinite loop. Normally this delay routine would be a macro,
222  * but it isn't called during normal operation so we can afford
223  * to make it a function.
224  */
225 void
226 xl_wait(struct xl_softc *sc)
227 {
228 	int	i;
229 
230 	for (i = 0; i < XL_TIMEOUT; i++) {
231 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
232 			break;
233 	}
234 
235 	if (i == XL_TIMEOUT)
236 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
237 }
238 
239 /*
240  * MII access routines are provided for adapters with external
241  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
242  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
243  * Note: if you don't perform the MDIO operations just right,
244  * it's possible to end up with code that works correctly with
245  * some chips/CPUs/processor speeds/bus speeds/etc but not
246  * with others.
247  */
248 #define MII_SET(x)					\
249 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
250 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
251 
252 #define MII_CLR(x)					\
253 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
254 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
255 
256 /*
257  * Sync the PHYs by setting data bit and strobing the clock 32 times.
258  */
259 void
260 xl_mii_sync(struct xl_softc *sc)
261 {
262 	int	i;
263 
264 	XL_SEL_WIN(4);
265 	MII_SET(XL_MII_DIR|XL_MII_DATA);
266 
267 	for (i = 0; i < 32; i++) {
268 		MII_SET(XL_MII_CLK);
269 		MII_SET(XL_MII_DATA);
270 		MII_SET(XL_MII_DATA);
271 		MII_CLR(XL_MII_CLK);
272 		MII_SET(XL_MII_DATA);
273 		MII_SET(XL_MII_DATA);
274 	}
275 }
276 
277 /*
278  * Clock a series of bits through the MII.
279  */
280 void
281 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
282 {
283 	int	i;
284 
285 	XL_SEL_WIN(4);
286 	MII_CLR(XL_MII_CLK);
287 
288 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
289                 if (bits & i) {
290 			MII_SET(XL_MII_DATA);
291                 } else {
292 			MII_CLR(XL_MII_DATA);
293                 }
294 		MII_CLR(XL_MII_CLK);
295 		MII_SET(XL_MII_CLK);
296 	}
297 }
298 
299 /*
300  * Read an PHY register through the MII.
301  */
302 int
303 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
304 {
305 	int	i, ack, s;
306 
307 	s = splnet();
308 
309 	/*
310 	 * Set up frame for RX.
311 	 */
312 	frame->mii_stdelim = XL_MII_STARTDELIM;
313 	frame->mii_opcode = XL_MII_READOP;
314 	frame->mii_turnaround = 0;
315 	frame->mii_data = 0;
316 
317 	/*
318 	 * Select register window 4.
319 	 */
320 
321 	XL_SEL_WIN(4);
322 
323 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
324 	/*
325  	 * Turn on data xmit.
326 	 */
327 	MII_SET(XL_MII_DIR);
328 
329 	xl_mii_sync(sc);
330 
331 	/*
332 	 * Send command/address info.
333 	 */
334 	xl_mii_send(sc, frame->mii_stdelim, 2);
335 	xl_mii_send(sc, frame->mii_opcode, 2);
336 	xl_mii_send(sc, frame->mii_phyaddr, 5);
337 	xl_mii_send(sc, frame->mii_regaddr, 5);
338 
339 	/* Idle bit */
340 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
341 	MII_SET(XL_MII_CLK);
342 
343 	/* Turn off xmit. */
344 	MII_CLR(XL_MII_DIR);
345 
346 	/* Check for ack */
347 	MII_CLR(XL_MII_CLK);
348 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
349 	MII_SET(XL_MII_CLK);
350 
351 	/*
352 	 * Now try reading data bits. If the ack failed, we still
353 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
354 	 */
355 	if (ack) {
356 		for(i = 0; i < 16; i++) {
357 			MII_CLR(XL_MII_CLK);
358 			MII_SET(XL_MII_CLK);
359 		}
360 		goto fail;
361 	}
362 
363 	for (i = 0x8000; i; i >>= 1) {
364 		MII_CLR(XL_MII_CLK);
365 		if (!ack) {
366 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
367 				frame->mii_data |= i;
368 		}
369 		MII_SET(XL_MII_CLK);
370 	}
371 
372 fail:
373 
374 	MII_CLR(XL_MII_CLK);
375 	MII_SET(XL_MII_CLK);
376 
377 	splx(s);
378 
379 	if (ack)
380 		return (1);
381 	return (0);
382 }
383 
384 /*
385  * Write to a PHY register through the MII.
386  */
387 int
388 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
389 {
390 	int	s;
391 
392 	s = splnet();
393 
394 	/*
395 	 * Set up frame for TX.
396 	 */
397 
398 	frame->mii_stdelim = XL_MII_STARTDELIM;
399 	frame->mii_opcode = XL_MII_WRITEOP;
400 	frame->mii_turnaround = XL_MII_TURNAROUND;
401 
402 	/*
403 	 * Select the window 4.
404 	 */
405 	XL_SEL_WIN(4);
406 
407 	/*
408  	 * Turn on data output.
409 	 */
410 	MII_SET(XL_MII_DIR);
411 
412 	xl_mii_sync(sc);
413 
414 	xl_mii_send(sc, frame->mii_stdelim, 2);
415 	xl_mii_send(sc, frame->mii_opcode, 2);
416 	xl_mii_send(sc, frame->mii_phyaddr, 5);
417 	xl_mii_send(sc, frame->mii_regaddr, 5);
418 	xl_mii_send(sc, frame->mii_turnaround, 2);
419 	xl_mii_send(sc, frame->mii_data, 16);
420 
421 	/* Idle bit. */
422 	MII_SET(XL_MII_CLK);
423 	MII_CLR(XL_MII_CLK);
424 
425 	/*
426 	 * Turn off xmit.
427 	 */
428 	MII_CLR(XL_MII_DIR);
429 
430 	splx(s);
431 
432 	return (0);
433 }
434 
435 int
436 xl_miibus_readreg(struct device *self, int phy, int reg)
437 {
438 	struct xl_softc *sc = (struct xl_softc *)self;
439 	struct xl_mii_frame	frame;
440 
441 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
442 		return (0);
443 
444 	bzero(&frame, sizeof(frame));
445 
446 	frame.mii_phyaddr = phy;
447 	frame.mii_regaddr = reg;
448 	xl_mii_readreg(sc, &frame);
449 
450 	return (frame.mii_data);
451 }
452 
453 void
454 xl_miibus_writereg(struct device *self, int phy, int reg, int data)
455 {
456 	struct xl_softc *sc = (struct xl_softc *)self;
457 	struct xl_mii_frame	frame;
458 
459 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
460 		return;
461 
462 	bzero(&frame, sizeof(frame));
463 
464 	frame.mii_phyaddr = phy;
465 	frame.mii_regaddr = reg;
466 	frame.mii_data = data;
467 
468 	xl_mii_writereg(sc, &frame);
469 }
470 
471 void
472 xl_miibus_statchg(struct device *self)
473 {
474 	struct xl_softc *sc = (struct xl_softc *)self;
475 
476 	xl_setcfg(sc);
477 
478 	/* Set ASIC's duplex mode to match the PHY. */
479 	XL_SEL_WIN(3);
480 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
481 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
482 	else
483 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
484 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
485 }
486 
487 /*
488  * The EEPROM is slow: give it time to come ready after issuing
489  * it a command.
490  */
491 int
492 xl_eeprom_wait(struct xl_softc *sc)
493 {
494 	int	i;
495 
496 	for (i = 0; i < 100; i++) {
497 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
498 			DELAY(162);
499 		else
500 			break;
501 	}
502 
503 	if (i == 100) {
504 		printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
505 		return (1);
506 	}
507 
508 	return (0);
509 }
510 
511 /*
512  * Read a sequence of words from the EEPROM. Note that ethernet address
513  * data is stored in the EEPROM in network byte order.
514  */
515 int
516 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
517 {
518 	int		err = 0, i;
519 	u_int16_t	word = 0, *ptr;
520 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
521 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
522 	/* WARNING! DANGER!
523 	 * It's easy to accidentally overwrite the rom content!
524 	 * Note: the 3c575 uses 8bit EEPROM offsets.
525 	 */
526 	XL_SEL_WIN(0);
527 
528 	if (xl_eeprom_wait(sc))
529 		return (1);
530 
531 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
532 		off += 0x30;
533 
534 	for (i = 0; i < cnt; i++) {
535 		if (sc->xl_flags & XL_FLAG_8BITROM)
536 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
537 			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
538 		else
539 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
540 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
541 		err = xl_eeprom_wait(sc);
542 		if (err)
543 			break;
544 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
545 		ptr = (u_int16_t *)(dest + (i * 2));
546 		if (swap)
547 			*ptr = ntohs(word);
548 		else
549 			*ptr = word;
550 	}
551 
552 	return (err ? 1 : 0);
553 }
554 
555 void
556 xl_iff(struct xl_softc *sc)
557 {
558 	if (sc->xl_type == XL_TYPE_905B)
559 		xl_iff_905b(sc);
560 	else
561 		xl_iff_90x(sc);
562 }
563 
564 /*
565  * NICs older than the 3c905B have only one multicast option, which
566  * is to enable reception of all multicast frames.
567  */
568 void
569 xl_iff_90x(struct xl_softc *sc)
570 {
571 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
572 	struct arpcom	*ac = &sc->sc_arpcom;
573 	u_int8_t	rxfilt;
574 
575 	XL_SEL_WIN(5);
576 
577 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
578 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
579 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
580 	ifp->if_flags &= ~IFF_ALLMULTI;
581 
582 	/*
583 	 * Always accept broadcast frames.
584 	 * Always accept frames destined to our station address.
585 	 */
586 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
587 
588 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
589 		ifp->if_flags |= IFF_ALLMULTI;
590 		if (ifp->if_flags & IFF_PROMISC)
591 			rxfilt |= XL_RXFILTER_ALLFRAMES;
592 		else
593 			rxfilt |= XL_RXFILTER_ALLMULTI;
594 	}
595 
596 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
597 
598 	XL_SEL_WIN(7);
599 }
600 
601 /*
602  * 3c905B adapters have a hash filter that we can program.
603  */
604 void
605 xl_iff_905b(struct xl_softc *sc)
606 {
607 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
608 	struct arpcom	*ac = &sc->sc_arpcom;
609 	int		h = 0, i;
610 	struct ether_multi *enm;
611 	struct ether_multistep step;
612 	u_int8_t	rxfilt;
613 
614 	XL_SEL_WIN(5);
615 
616 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
617 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
618 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
619 	    XL_RXFILTER_MULTIHASH);
620 	ifp->if_flags &= ~IFF_ALLMULTI;
621 
622 	/*
623 	 * Always accept broadcast frames.
624 	 * Always accept frames destined to our station address.
625 	 */
626 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
627 
628 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
629 		ifp->if_flags |= IFF_ALLMULTI;
630 		if (ifp->if_flags & IFF_PROMISC)
631 			rxfilt |= XL_RXFILTER_ALLFRAMES;
632 		else
633 			rxfilt |= XL_RXFILTER_ALLMULTI;
634 	} else {
635 		rxfilt |= XL_RXFILTER_MULTIHASH;
636 
637 		/* first, zot all the existing hash bits */
638 		for (i = 0; i < XL_HASHFILT_SIZE; i++)
639 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
640 
641 		/* now program new ones */
642 		ETHER_FIRST_MULTI(step, ac, enm);
643 		while (enm != NULL) {
644 			h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
645 			    0x000000FF;
646 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH |
647 			    XL_HASH_SET | h);
648 
649 			ETHER_NEXT_MULTI(step, enm);
650 		}
651 	}
652 
653 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
654 
655 	XL_SEL_WIN(7);
656 }
657 
658 void
659 xl_setcfg(struct xl_softc *sc)
660 {
661 	u_int32_t icfg;
662 
663 	XL_SEL_WIN(3);
664 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
665 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
666 	if (sc->xl_media & XL_MEDIAOPT_MII ||
667 		sc->xl_media & XL_MEDIAOPT_BT4)
668 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
669 	if (sc->xl_media & XL_MEDIAOPT_BTX)
670 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
671 
672 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
673 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
674 }
675 
676 void
677 xl_setmode(struct xl_softc *sc, uint64_t media)
678 {
679 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
680 	u_int32_t icfg;
681 	u_int16_t mediastat;
682 
683 	XL_SEL_WIN(4);
684 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
685 	XL_SEL_WIN(3);
686 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
687 
688 	if (sc->xl_media & XL_MEDIAOPT_BT) {
689 		if (IFM_SUBTYPE(media) == IFM_10_T) {
690 			ifp->if_baudrate = IF_Mbps(10);
691 			sc->xl_xcvr = XL_XCVR_10BT;
692 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
693 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
694 			mediastat |= XL_MEDIASTAT_LINKBEAT|
695 					XL_MEDIASTAT_JABGUARD;
696 			mediastat &= ~XL_MEDIASTAT_SQEENB;
697 		}
698 	}
699 
700 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
701 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
702 			ifp->if_baudrate = IF_Mbps(100);
703 			sc->xl_xcvr = XL_XCVR_100BFX;
704 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
705 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
706 			mediastat |= XL_MEDIASTAT_LINKBEAT;
707 			mediastat &= ~XL_MEDIASTAT_SQEENB;
708 		}
709 	}
710 
711 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
712 		if (IFM_SUBTYPE(media) == IFM_10_5) {
713 			ifp->if_baudrate = IF_Mbps(10);
714 			sc->xl_xcvr = XL_XCVR_AUI;
715 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
716 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
717 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
718 					XL_MEDIASTAT_JABGUARD);
719 			mediastat |= ~XL_MEDIASTAT_SQEENB;
720 		}
721 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
722 			ifp->if_baudrate = IF_Mbps(10);
723 			sc->xl_xcvr = XL_XCVR_AUI;
724 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
725 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
726 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
727 					XL_MEDIASTAT_JABGUARD);
728 			mediastat |= ~XL_MEDIASTAT_SQEENB;
729 		}
730 	}
731 
732 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
733 		if (IFM_SUBTYPE(media) == IFM_10_2) {
734 			ifp->if_baudrate = IF_Mbps(10);
735 			sc->xl_xcvr = XL_XCVR_COAX;
736 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
737 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
738 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
739 					XL_MEDIASTAT_JABGUARD|
740 					XL_MEDIASTAT_SQEENB);
741 		}
742 	}
743 
744 	if ((media & IFM_GMASK) == IFM_FDX ||
745 			IFM_SUBTYPE(media) == IFM_100_FX) {
746 		XL_SEL_WIN(3);
747 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
748 	} else {
749 		XL_SEL_WIN(3);
750 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
751 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
752 	}
753 
754 	if (IFM_SUBTYPE(media) == IFM_10_2)
755 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
756 	else
757 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
758 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
759 	XL_SEL_WIN(4);
760 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
761 	DELAY(800);
762 	XL_SEL_WIN(7);
763 }
764 
765 void
766 xl_reset(struct xl_softc *sc)
767 {
768 	int	i;
769 
770 	XL_SEL_WIN(0);
771 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
772 		    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
773 		     XL_RESETOPT_DISADVFD:0));
774 
775 	/*
776 	 * Pause briefly after issuing the reset command before trying
777 	 * to access any other registers. With my 3c575C cardbus card,
778 	 * failing to do this results in the system locking up while
779 	 * trying to poll the command busy bit in the status register.
780 	 */
781 	DELAY(100000);
782 
783 	for (i = 0; i < XL_TIMEOUT; i++) {
784 		DELAY(10);
785 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
786 			break;
787 	}
788 
789 	if (i == XL_TIMEOUT)
790 		printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
791 
792 	/* Note: the RX reset takes an absurd amount of time
793 	 * on newer versions of the Tornado chips such as those
794 	 * on the 3c905CX and newer 3c908C cards. We wait an
795 	 * extra amount of time so that xl_wait() doesn't complain
796 	 * and annoy the users.
797 	 */
798 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
799 	DELAY(100000);
800 	xl_wait(sc);
801 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
802 	xl_wait(sc);
803 
804 	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
805 	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
806 		XL_SEL_WIN(2);
807 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
808 		    XL_W2_RESET_OPTIONS)
809 		    | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
810 		    | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
811 		    );
812 	}
813 
814 	/* Wait a little while for the chip to get its brains in order. */
815 	DELAY(100000);
816 }
817 
818 /*
819  * This routine is a kludge to work around possible hardware faults
820  * or manufacturing defects that can cause the media options register
821  * (or reset options register, as it's called for the first generation
822  * 3c90x adapters) to return an incorrect result. I have encountered
823  * one Dell Latitude laptop docking station with an integrated 3c905-TX
824  * which doesn't have any of the 'mediaopt' bits set. This screws up
825  * the attach routine pretty badly because it doesn't know what media
826  * to look for. If we find ourselves in this predicament, this routine
827  * will try to guess the media options values and warn the user of a
828  * possible manufacturing defect with his adapter/system/whatever.
829  */
830 void
831 xl_mediacheck(struct xl_softc *sc)
832 {
833 	/*
834 	 * If some of the media options bits are set, assume they are
835 	 * correct. If not, try to figure it out down below.
836 	 * XXX I should check for 10baseFL, but I don't have an adapter
837 	 * to test with.
838 	 */
839 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
840 		/*
841 	 	 * Check the XCVR value. If it's not in the normal range
842 	 	 * of values, we need to fake it up here.
843 	 	 */
844 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
845 			return;
846 		else {
847 			printf("%s: bogus xcvr value "
848 			"in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
849 			printf("%s: choosing new default based "
850 				"on card type\n", sc->sc_dev.dv_xname);
851 		}
852 	} else {
853 		if (sc->xl_type == XL_TYPE_905B &&
854 		    sc->xl_media & XL_MEDIAOPT_10FL)
855 			return;
856 		printf("%s: WARNING: no media options bits set in "
857 			"the media options register!!\n", sc->sc_dev.dv_xname);
858 		printf("%s: this could be a manufacturing defect in "
859 			"your adapter or system\n", sc->sc_dev.dv_xname);
860 		printf("%s: attempting to guess media type; you "
861 			"should probably consult your vendor\n", sc->sc_dev.dv_xname);
862 	}
863 
864 	xl_choose_xcvr(sc, 1);
865 }
866 
867 void
868 xl_choose_xcvr(struct xl_softc *sc, int verbose)
869 {
870 	u_int16_t devid;
871 
872 	/*
873 	 * Read the device ID from the EEPROM.
874 	 * This is what's loaded into the PCI device ID register, so it has
875 	 * to be correct otherwise we wouldn't have gotten this far.
876 	 */
877 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
878 
879 	switch(devid) {
880 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
881 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
882 		sc->xl_media = XL_MEDIAOPT_BT;
883 		sc->xl_xcvr = XL_XCVR_10BT;
884 		if (verbose)
885 			printf("%s: guessing 10BaseT transceiver\n",
886 			    sc->sc_dev.dv_xname);
887 		break;
888 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
889 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
890 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
891 		sc->xl_xcvr = XL_XCVR_10BT;
892 		if (verbose)
893 			printf("%s: guessing COMBO (AUI/BNC/TP)\n",
894 			    sc->sc_dev.dv_xname);
895 		break;
896 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
897 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
898 		sc->xl_xcvr = XL_XCVR_10BT;
899 		if (verbose)
900 			printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
901 		break;
902 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
903 		sc->xl_media = XL_MEDIAOPT_10FL;
904 		sc->xl_xcvr = XL_XCVR_AUI;
905 		if (verbose)
906 			printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
907 		break;
908 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
909 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
910 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
911 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
912 	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
913 	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
914 	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
915 	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
916 	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
917 	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
918 	case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
919 		sc->xl_media = XL_MEDIAOPT_MII;
920 		sc->xl_xcvr = XL_XCVR_MII;
921 		if (verbose)
922 			printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
923 		break;
924 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
925 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
926 		sc->xl_media = XL_MEDIAOPT_BT4;
927 		sc->xl_xcvr = XL_XCVR_MII;
928 		if (verbose)
929 			printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
930 		break;
931 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
932 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
933 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
934 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
935 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
936 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
937 		sc->xl_media = XL_MEDIAOPT_BTX;
938 		sc->xl_xcvr = XL_XCVR_AUTO;
939 		if (verbose)
940 			printf("%s: guessing 10/100 internal\n",
941 			    sc->sc_dev.dv_xname);
942 		break;
943 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
944 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
945 		sc->xl_xcvr = XL_XCVR_AUTO;
946 		if (verbose)
947 			printf("%s: guessing 10/100 plus BNC/AUI\n",
948 			    sc->sc_dev.dv_xname);
949 		break;
950 	default:
951 		printf("%s: unknown device ID: %x -- "
952 			"defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
953 		sc->xl_media = XL_MEDIAOPT_BT;
954 		break;
955 	}
956 }
957 
958 /*
959  * Initialize the transmit descriptors.
960  */
961 int
962 xl_list_tx_init(struct xl_softc *sc)
963 {
964 	struct xl_chain_data	*cd;
965 	struct xl_list_data	*ld;
966 	int			i;
967 
968 	cd = &sc->xl_cdata;
969 	ld = sc->xl_ldata;
970 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
971 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
972 		if (i == (XL_TX_LIST_CNT - 1))
973 			cd->xl_tx_chain[i].xl_next = NULL;
974 		else
975 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
976 	}
977 
978 	cd->xl_tx_free = &cd->xl_tx_chain[0];
979 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
980 
981 	return (0);
982 }
983 
984 /*
985  * Initialize the transmit descriptors.
986  */
987 int
988 xl_list_tx_init_90xB(struct xl_softc *sc)
989 {
990 	struct xl_chain_data	*cd;
991 	struct xl_list_data	*ld;
992 	int			i, next, prev;
993 
994 	cd = &sc->xl_cdata;
995 	ld = sc->xl_ldata;
996 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
997 		if (i == (XL_TX_LIST_CNT - 1))
998 			next = 0;
999 		else
1000 			next = i + 1;
1001 		if (i == 0)
1002 			prev = XL_TX_LIST_CNT - 1;
1003 		else
1004 			prev = i - 1;
1005 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1006 		cd->xl_tx_chain[i].xl_phys =
1007 		    sc->sc_listmap->dm_segs[0].ds_addr +
1008 		    offsetof(struct xl_list_data, xl_tx_list[i]);
1009 		cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1010 		cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1011 	}
1012 
1013 	bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1014 	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1015 
1016 	cd->xl_tx_prod = 1;
1017 	cd->xl_tx_cons = 1;
1018 	cd->xl_tx_cnt = 0;
1019 
1020 	return (0);
1021 }
1022 
1023 /*
1024  * Initialize the RX descriptors and allocate mbufs for them. Note that
1025  * we arrange the descriptors in a closed ring, so that the last descriptor
1026  * points back to the first.
1027  */
1028 int
1029 xl_list_rx_init(struct xl_softc *sc)
1030 {
1031 	struct xl_chain_data	*cd;
1032 	struct xl_list_data	*ld;
1033 	int			i, n;
1034 	bus_addr_t		next;
1035 
1036 	cd = &sc->xl_cdata;
1037 	ld = sc->xl_ldata;
1038 
1039 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1040 		cd->xl_rx_chain[i].xl_ptr =
1041 			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1042 		if (i == (XL_RX_LIST_CNT - 1))
1043 			n = 0;
1044 		else
1045 			n = i + 1;
1046 		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1047 		next = sc->sc_listmap->dm_segs[0].ds_addr +
1048 		       offsetof(struct xl_list_data, xl_rx_list[n]);
1049 		ld->xl_rx_list[i].xl_next = htole32(next);
1050 	}
1051 
1052 	cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
1053 	if_rxr_init(&cd->xl_rx_ring, 2, XL_RX_LIST_CNT - 1);
1054 	xl_fill_rx_ring(sc);
1055 	return (0);
1056 }
1057 
1058 void
1059 xl_fill_rx_ring(struct xl_softc *sc)
1060 {
1061 	struct xl_chain_data    *cd;
1062 	u_int			slots;
1063 
1064 	cd = &sc->xl_cdata;
1065 
1066 	for (slots = if_rxr_get(&cd->xl_rx_ring, XL_RX_LIST_CNT);
1067 	     slots > 0; slots--) {
1068 		if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS)
1069 			break;
1070 		cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
1071 	}
1072 	if_rxr_put(&cd->xl_rx_ring, slots);
1073 }
1074 
1075 /*
1076  * Initialize an RX descriptor and attach an MBUF cluster.
1077  */
1078 int
1079 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1080 {
1081 	struct mbuf	*m_new = NULL;
1082 	bus_dmamap_t	map;
1083 
1084 	m_new = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1085 	if (!m_new)
1086 		return (ENOBUFS);
1087 
1088 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1089 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1090 	    mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1091 		m_freem(m_new);
1092 		return (ENOBUFS);
1093 	}
1094 
1095 	/* sync the old map, and unload it (if necessary) */
1096 	if (c->map->dm_nsegs != 0) {
1097 		bus_dmamap_sync(sc->sc_dmat, c->map,
1098 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1099 		bus_dmamap_unload(sc->sc_dmat, c->map);
1100 	}
1101 
1102 	map = c->map;
1103 	c->map = sc->sc_rx_sparemap;
1104 	sc->sc_rx_sparemap = map;
1105 
1106 	/* Force longword alignment for packet payload. */
1107 	m_adj(m_new, ETHER_ALIGN);
1108 
1109 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1110 	    BUS_DMASYNC_PREREAD);
1111 
1112 	c->xl_mbuf = m_new;
1113 	c->xl_ptr->xl_frag.xl_addr =
1114 	    htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1115 	c->xl_ptr->xl_frag.xl_len =
1116 	    htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1117 	c->xl_ptr->xl_status = htole32(0);
1118 
1119 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1120 	    ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1121 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1122 
1123 	return (0);
1124 }
1125 
1126 /*
1127  * A frame has been uploaded: pass the resulting mbuf chain up to
1128  * the higher level protocols.
1129  */
1130 void
1131 xl_rxeof(struct xl_softc *sc)
1132 {
1133 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1134         struct mbuf		*m;
1135         struct ifnet		*ifp;
1136 	struct xl_chain_onefrag	*cur_rx;
1137 	int			total_len = 0;
1138 	u_int32_t		rxstat;
1139 	u_int16_t		sumflags = 0;
1140 
1141 	ifp = &sc->sc_arpcom.ac_if;
1142 
1143 again:
1144 
1145 	while (if_rxr_inuse(&sc->xl_cdata.xl_rx_ring) > 0) {
1146 		cur_rx = sc->xl_cdata.xl_rx_cons;
1147 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1148 		    ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1149 		    sizeof(struct xl_list),
1150 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1151 		if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0)
1152 			break;
1153 		m = cur_rx->xl_mbuf;
1154 		cur_rx->xl_mbuf = NULL;
1155 		sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
1156 		if_rxr_put(&sc->xl_cdata.xl_rx_ring, 1);
1157 		total_len = rxstat & XL_RXSTAT_LENMASK;
1158 
1159 		/*
1160 		 * Since we have told the chip to allow large frames,
1161 		 * we need to trap giant frame errors in software. We allow
1162 		 * a little more than the normal frame size to account for
1163 		 * frames with VLAN tags.
1164 		 */
1165 		if (total_len > XL_MAX_FRAMELEN)
1166 			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1167 
1168 		/*
1169 		 * If an error occurs, update stats, clear the
1170 		 * status word and leave the mbuf cluster in place:
1171 		 * it should simply get re-used next time this descriptor
1172 	 	 * comes up in the ring.
1173 		 */
1174 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1175 			ifp->if_ierrors++;
1176 			cur_rx->xl_ptr->xl_status = htole32(0);
1177 			m_freem(m);
1178 			continue;
1179 		}
1180 
1181 		/*
1182 		 * If the error bit was not set, the upload complete
1183 		 * bit should be set which means we have a valid packet.
1184 		 * If not, something truly strange has happened.
1185 		 */
1186 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1187 			printf("%s: bad receive status -- "
1188 			    "packet dropped\n", sc->sc_dev.dv_xname);
1189 			ifp->if_ierrors++;
1190 			cur_rx->xl_ptr->xl_status = htole32(0);
1191 			m_freem(m);
1192 			continue;
1193 		}
1194 
1195 		m->m_pkthdr.len = m->m_len = total_len;
1196 
1197 		if (sc->xl_type == XL_TYPE_905B) {
1198 			if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1199 			    (rxstat & XL_RXSTAT_IPCKOK))
1200 				sumflags |= M_IPV4_CSUM_IN_OK;
1201 
1202 			if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1203 			    (rxstat & XL_RXSTAT_TCPCKOK))
1204 				sumflags |= M_TCP_CSUM_IN_OK;
1205 
1206 			if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1207 			    (rxstat & XL_RXSTAT_UDPCKOK))
1208 				sumflags |= M_UDP_CSUM_IN_OK;
1209 
1210 			m->m_pkthdr.csum_flags = sumflags;
1211 		}
1212 
1213 		ml_enqueue(&ml, m);
1214 	}
1215 
1216 	xl_fill_rx_ring(sc);
1217 
1218 	/*
1219 	 * Handle the 'end of channel' condition. When the upload
1220 	 * engine hits the end of the RX ring, it will stall. This
1221 	 * is our cue to flush the RX ring, reload the uplist pointer
1222 	 * register and unstall the engine.
1223 	 * XXX This is actually a little goofy. With the ThunderLAN
1224 	 * chip, you get an interrupt when the receiver hits the end
1225 	 * of the receive ring, which tells you exactly when you
1226 	 * you need to reload the ring pointer. Here we have to
1227 	 * fake it. I'm mad at myself for not being clever enough
1228 	 * to avoid the use of a goto here.
1229 	 */
1230 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1231 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1232 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1233 		xl_wait(sc);
1234 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1235 		xl_fill_rx_ring(sc);
1236 		goto again;
1237 	}
1238 
1239 	if_input(ifp, &ml);
1240 }
1241 
1242 /*
1243  * A frame was downloaded to the chip. It's safe for us to clean up
1244  * the list buffers.
1245  */
1246 void
1247 xl_txeof(struct xl_softc *sc)
1248 {
1249 	struct xl_chain		*cur_tx;
1250 	struct ifnet		*ifp;
1251 
1252 	ifp = &sc->sc_arpcom.ac_if;
1253 
1254 	/*
1255 	 * Go through our tx list and free mbufs for those
1256 	 * frames that have been uploaded. Note: the 3c905B
1257 	 * sets a special bit in the status word to let us
1258 	 * know that a frame has been downloaded, but the
1259 	 * original 3c900/3c905 adapters don't do that.
1260 	 * Consequently, we have to use a different test if
1261 	 * xl_type != XL_TYPE_905B.
1262 	 */
1263 	while (sc->xl_cdata.xl_tx_head != NULL) {
1264 		cur_tx = sc->xl_cdata.xl_tx_head;
1265 
1266 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1267 		    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1268 		    sizeof(struct xl_list),
1269 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1270 
1271 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1272 			break;
1273 
1274 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1275 		ifp->if_opackets++;
1276 		if (cur_tx->map->dm_nsegs != 0) {
1277 			bus_dmamap_t map = cur_tx->map;
1278 
1279 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1280 			    BUS_DMASYNC_POSTWRITE);
1281 			bus_dmamap_unload(sc->sc_dmat, map);
1282 		}
1283 		if (cur_tx->xl_mbuf != NULL) {
1284 			m_freem(cur_tx->xl_mbuf);
1285 			cur_tx->xl_mbuf = NULL;
1286 		}
1287 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1288 		sc->xl_cdata.xl_tx_free = cur_tx;
1289 	}
1290 
1291 	if (sc->xl_cdata.xl_tx_head == NULL) {
1292 		ifq_clr_oactive(&ifp->if_snd);
1293 		/* Clear the timeout timer. */
1294 		ifp->if_timer = 0;
1295 		sc->xl_cdata.xl_tx_tail = NULL;
1296 	} else {
1297 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1298 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1299 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1300 			    sc->sc_listmap->dm_segs[0].ds_addr +
1301 			    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1302 			    sc->sc_listkva));
1303 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1304 		}
1305 	}
1306 }
1307 
1308 void
1309 xl_txeof_90xB(struct xl_softc *sc)
1310 {
1311 	struct xl_chain *cur_tx = NULL;
1312 	struct ifnet *ifp;
1313 	int idx;
1314 
1315 	ifp = &sc->sc_arpcom.ac_if;
1316 
1317 	idx = sc->xl_cdata.xl_tx_cons;
1318 	while (idx != sc->xl_cdata.xl_tx_prod) {
1319 
1320 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1321 
1322 		if ((cur_tx->xl_ptr->xl_status &
1323 		    htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1324 			break;
1325 
1326 		if (cur_tx->xl_mbuf != NULL) {
1327 			m_freem(cur_tx->xl_mbuf);
1328 			cur_tx->xl_mbuf = NULL;
1329 		}
1330 
1331 		if (cur_tx->map->dm_nsegs != 0) {
1332 			bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1333 			    0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1334 			bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1335 		}
1336 
1337 		ifp->if_opackets++;
1338 
1339 		sc->xl_cdata.xl_tx_cnt--;
1340 		XL_INC(idx, XL_TX_LIST_CNT);
1341 	}
1342 
1343 	sc->xl_cdata.xl_tx_cons = idx;
1344 
1345 	if (cur_tx != NULL)
1346 		ifq_clr_oactive(&ifp->if_snd);
1347 	if (sc->xl_cdata.xl_tx_cnt == 0)
1348 		ifp->if_timer = 0;
1349 }
1350 
1351 /*
1352  * TX 'end of channel' interrupt handler. Actually, we should
1353  * only get a 'TX complete' interrupt if there's a transmit error,
1354  * so this is really TX error handler.
1355  */
1356 void
1357 xl_txeoc(struct xl_softc *sc)
1358 {
1359 	u_int8_t	txstat;
1360 
1361 	while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1362 		if (txstat & XL_TXSTATUS_UNDERRUN ||
1363 			txstat & XL_TXSTATUS_JABBER ||
1364 			txstat & XL_TXSTATUS_RECLAIM) {
1365 			if (txstat != 0x90) {
1366 				printf("%s: transmission error: %x\n",
1367 				    sc->sc_dev.dv_xname, txstat);
1368 			}
1369 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1370 			xl_wait(sc);
1371 			if (sc->xl_type == XL_TYPE_905B) {
1372 				if (sc->xl_cdata.xl_tx_cnt) {
1373 					int i;
1374 					struct xl_chain *c;
1375 
1376 					i = sc->xl_cdata.xl_tx_cons;
1377 					c = &sc->xl_cdata.xl_tx_chain[i];
1378 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1379 					    c->xl_phys);
1380 					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1381 				}
1382 			} else {
1383 				if (sc->xl_cdata.xl_tx_head != NULL)
1384 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1385 					    sc->sc_listmap->dm_segs[0].ds_addr +
1386 					    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1387 					    sc->sc_listkva));
1388 			}
1389 			/*
1390 			 * Remember to set this for the
1391 			 * first generation 3c90X chips.
1392 			 */
1393 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1394 			if (txstat & XL_TXSTATUS_UNDERRUN &&
1395 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1396 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1397 #ifdef notdef
1398 				printf("%s: tx underrun, increasing tx start"
1399 				    " threshold to %d\n", sc->sc_dev.dv_xname,
1400 				    sc->xl_tx_thresh);
1401 #endif
1402 			}
1403 			CSR_WRITE_2(sc, XL_COMMAND,
1404 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1405 			if (sc->xl_type == XL_TYPE_905B) {
1406 				CSR_WRITE_2(sc, XL_COMMAND,
1407 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1408 			}
1409 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1410 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1411 		} else {
1412 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1413 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1414 		}
1415 		/*
1416 		 * Write an arbitrary byte to the TX_STATUS register
1417 	 	 * to clear this interrupt/error and advance to the next.
1418 		 */
1419 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1420 	}
1421 }
1422 
1423 int
1424 xl_intr(void *arg)
1425 {
1426 	struct xl_softc		*sc;
1427 	struct ifnet		*ifp;
1428 	u_int16_t		status;
1429 	int			claimed = 0;
1430 
1431 	sc = arg;
1432 	ifp = &sc->sc_arpcom.ac_if;
1433 
1434 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1435 
1436 		claimed = 1;
1437 
1438 		CSR_WRITE_2(sc, XL_COMMAND,
1439 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1440 
1441 		if (sc->intr_ack)
1442 			(*sc->intr_ack)(sc);
1443 
1444 		if (!(ifp->if_flags & IFF_RUNNING))
1445 			return (claimed);
1446 
1447 		if (status & XL_STAT_UP_COMPLETE)
1448 			xl_rxeof(sc);
1449 
1450 		if (status & XL_STAT_DOWN_COMPLETE) {
1451 			if (sc->xl_type == XL_TYPE_905B)
1452 				xl_txeof_90xB(sc);
1453 			else
1454 				xl_txeof(sc);
1455 		}
1456 
1457 		if (status & XL_STAT_TX_COMPLETE) {
1458 			ifp->if_oerrors++;
1459 			xl_txeoc(sc);
1460 		}
1461 
1462 		if (status & XL_STAT_ADFAIL)
1463 			xl_init(sc);
1464 
1465 		if (status & XL_STAT_STATSOFLOW) {
1466 			sc->xl_stats_no_timeout = 1;
1467 			xl_stats_update(sc);
1468 			sc->xl_stats_no_timeout = 0;
1469 		}
1470 	}
1471 
1472 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1473 		(*ifp->if_start)(ifp);
1474 
1475 	return (claimed);
1476 }
1477 
1478 void
1479 xl_stats_update(void *xsc)
1480 {
1481 	struct xl_softc		*sc;
1482 	struct ifnet		*ifp;
1483 	struct xl_stats		xl_stats;
1484 	u_int8_t		*p;
1485 	int			i;
1486 	struct mii_data		*mii = NULL;
1487 
1488 	bzero(&xl_stats, sizeof(struct xl_stats));
1489 
1490 	sc = xsc;
1491 	ifp = &sc->sc_arpcom.ac_if;
1492 	if (sc->xl_hasmii)
1493 		mii = &sc->sc_mii;
1494 
1495 	p = (u_int8_t *)&xl_stats;
1496 
1497 	/* Read all the stats registers. */
1498 	XL_SEL_WIN(6);
1499 
1500 	for (i = 0; i < 16; i++)
1501 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1502 
1503 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1504 
1505 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1506 				xl_stats.xl_tx_single_collision +
1507 				xl_stats.xl_tx_late_collision;
1508 
1509 	/*
1510 	 * Boomerang and cyclone chips have an extra stats counter
1511 	 * in window 4 (BadSSD). We have to read this too in order
1512 	 * to clear out all the stats registers and avoid a statsoflow
1513 	 * interrupt.
1514 	 */
1515 	XL_SEL_WIN(4);
1516 	CSR_READ_1(sc, XL_W4_BADSSD);
1517 
1518 	if (mii != NULL && (!sc->xl_stats_no_timeout))
1519 		mii_tick(mii);
1520 
1521 	XL_SEL_WIN(7);
1522 
1523 	if (!sc->xl_stats_no_timeout)
1524 		timeout_add_sec(&sc->xl_stsup_tmo, 1);
1525 }
1526 
1527 /*
1528  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1529  * pointers to the fragment pointers.
1530  */
1531 int
1532 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1533 {
1534 	int		error, frag, total_len;
1535 	u_int32_t	status;
1536 	bus_dmamap_t	map;
1537 
1538 	map = sc->sc_tx_sparemap;
1539 
1540 reload:
1541 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1542 	    m_head, BUS_DMA_NOWAIT);
1543 
1544 	if (error && error != EFBIG) {
1545 		m_freem(m_head);
1546 		return (1);
1547 	}
1548 
1549 	/*
1550  	 * Start packing the mbufs in this chain into
1551 	 * the fragment pointers. Stop when we run out
1552  	 * of fragments or hit the end of the mbuf chain.
1553 	 */
1554 	for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1555 		if (frag == XL_MAXFRAGS)
1556 			break;
1557 		total_len += map->dm_segs[frag].ds_len;
1558 		c->xl_ptr->xl_frag[frag].xl_addr =
1559 		    htole32(map->dm_segs[frag].ds_addr);
1560 		c->xl_ptr->xl_frag[frag].xl_len =
1561 		    htole32(map->dm_segs[frag].ds_len);
1562 	}
1563 
1564 	/*
1565 	 * Handle special case: we used up all 63 fragments,
1566 	 * but we have more mbufs left in the chain. Copy the
1567 	 * data into an mbuf cluster. Note that we don't
1568 	 * bother clearing the values in the other fragment
1569 	 * pointers/counters; it wouldn't gain us anything,
1570 	 * and would waste cycles.
1571 	 */
1572 	if (error) {
1573 		struct mbuf	*m_new = NULL;
1574 
1575 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1576 		if (m_new == NULL) {
1577 			m_freem(m_head);
1578 			return (1);
1579 		}
1580 		if (m_head->m_pkthdr.len > MHLEN) {
1581 			MCLGET(m_new, M_DONTWAIT);
1582 			if (!(m_new->m_flags & M_EXT)) {
1583 				m_freem(m_new);
1584 				m_freem(m_head);
1585 				return (1);
1586 			}
1587 		}
1588 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1589 		    mtod(m_new, caddr_t));
1590 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1591 		m_freem(m_head);
1592 		m_head = m_new;
1593 		goto reload;
1594 	}
1595 
1596 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1597 	    BUS_DMASYNC_PREWRITE);
1598 
1599 	if (c->map->dm_nsegs != 0) {
1600 		bus_dmamap_sync(sc->sc_dmat, c->map,
1601 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1602 		bus_dmamap_unload(sc->sc_dmat, c->map);
1603 	}
1604 
1605 	c->xl_mbuf = m_head;
1606 	sc->sc_tx_sparemap = c->map;
1607 	c->map = map;
1608 	c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1609 	c->xl_ptr->xl_status = htole32(total_len);
1610 	c->xl_ptr->xl_next = 0;
1611 
1612 	if (sc->xl_type == XL_TYPE_905B) {
1613 		status = XL_TXSTAT_RND_DEFEAT;
1614 
1615 #ifndef XL905B_TXCSUM_BROKEN
1616 		if (m_head->m_pkthdr.csum_flags) {
1617 			if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1618 				status |= XL_TXSTAT_IPCKSUM;
1619 			if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1620 				status |= XL_TXSTAT_TCPCKSUM;
1621 			if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1622 				status |= XL_TXSTAT_UDPCKSUM;
1623 		}
1624 #endif
1625 		c->xl_ptr->xl_status = htole32(status);
1626 	}
1627 
1628 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1629 	    offsetof(struct xl_list_data, xl_tx_list[0]),
1630 	    sizeof(struct xl_list) * XL_TX_LIST_CNT,
1631 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1632 
1633 	return (0);
1634 }
1635 
1636 /*
1637  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1638  * to the mbuf data regions directly in the transmit lists. We also save a
1639  * copy of the pointers since the transmit list fragment pointers are
1640  * physical addresses.
1641  */
1642 void
1643 xl_start(struct ifnet *ifp)
1644 {
1645 	struct xl_softc		*sc;
1646 	struct mbuf		*m_head = NULL;
1647 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1648 	struct xl_chain		*prev_tx;
1649 	int			error;
1650 
1651 	sc = ifp->if_softc;
1652 
1653 	/*
1654 	 * Check for an available queue slot. If there are none,
1655 	 * punt.
1656 	 */
1657 	if (sc->xl_cdata.xl_tx_free == NULL) {
1658 		xl_txeoc(sc);
1659 		xl_txeof(sc);
1660 		if (sc->xl_cdata.xl_tx_free == NULL) {
1661 			ifq_set_oactive(&ifp->if_snd);
1662 			return;
1663 		}
1664 	}
1665 
1666 	start_tx = sc->xl_cdata.xl_tx_free;
1667 
1668 	while (sc->xl_cdata.xl_tx_free != NULL) {
1669 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1670 		if (m_head == NULL)
1671 			break;
1672 
1673 		/* Pick a descriptor off the free list. */
1674 		prev_tx = cur_tx;
1675 		cur_tx = sc->xl_cdata.xl_tx_free;
1676 
1677 		/* Pack the data into the descriptor. */
1678 		error = xl_encap(sc, cur_tx, m_head);
1679 		if (error) {
1680 			cur_tx = prev_tx;
1681 			continue;
1682 		}
1683 
1684 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1685 		cur_tx->xl_next = NULL;
1686 
1687 		/* Chain it together. */
1688 		if (prev != NULL) {
1689 			prev->xl_next = cur_tx;
1690 			prev->xl_ptr->xl_next =
1691 			    sc->sc_listmap->dm_segs[0].ds_addr +
1692 			    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1693 
1694 		}
1695 		prev = cur_tx;
1696 
1697 #if NBPFILTER > 0
1698 		/*
1699 		 * If there's a BPF listener, bounce a copy of this frame
1700 		 * to him.
1701 		 */
1702 		if (ifp->if_bpf)
1703 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1704 			    BPF_DIRECTION_OUT);
1705 #endif
1706 	}
1707 
1708 	/*
1709 	 * If there are no packets queued, bail.
1710 	 */
1711 	if (cur_tx == NULL)
1712 		return;
1713 
1714 	/*
1715 	 * Place the request for the upload interrupt
1716 	 * in the last descriptor in the chain. This way, if
1717 	 * we're chaining several packets at once, we'll only
1718 	 * get an interrupt once for the whole chain rather than
1719 	 * once for each packet.
1720 	 */
1721 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1722 
1723 	/*
1724 	 * Queue the packets. If the TX channel is clear, update
1725 	 * the downlist pointer register.
1726 	 */
1727 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1728 	xl_wait(sc);
1729 
1730 	if (sc->xl_cdata.xl_tx_head != NULL) {
1731 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1732 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1733 		    sc->sc_listmap->dm_segs[0].ds_addr +
1734 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1735 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1736 		    htole32(~XL_TXSTAT_DL_INTR);
1737 		sc->xl_cdata.xl_tx_tail = cur_tx;
1738 	} else {
1739 		sc->xl_cdata.xl_tx_head = start_tx;
1740 		sc->xl_cdata.xl_tx_tail = cur_tx;
1741 	}
1742 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1743 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1744 		    sc->sc_listmap->dm_segs[0].ds_addr +
1745 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1746 
1747 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1748 
1749 	XL_SEL_WIN(7);
1750 
1751 	/*
1752 	 * Set a timeout in case the chip goes out to lunch.
1753 	 */
1754 	ifp->if_timer = 5;
1755 
1756 	/*
1757 	 * XXX Under certain conditions, usually on slower machines
1758 	 * where interrupts may be dropped, it's possible for the
1759 	 * adapter to chew up all the buffers in the receive ring
1760 	 * and stall, without us being able to do anything about it.
1761 	 * To guard against this, we need to make a pass over the
1762 	 * RX queue to make sure there aren't any packets pending.
1763 	 * Doing it here means we can flush the receive ring at the
1764 	 * same time the chip is DMAing the transmit descriptors we
1765 	 * just gave it.
1766  	 *
1767 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1768 	 * nature of their chips in all their marketing literature;
1769 	 * we may as well take advantage of it. :)
1770 	 */
1771 	xl_rxeof(sc);
1772 }
1773 
1774 void
1775 xl_start_90xB(struct ifnet *ifp)
1776 {
1777 	struct xl_softc	*sc;
1778 	struct mbuf	*m_head = NULL;
1779 	struct xl_chain	*prev = NULL, *cur_tx = NULL, *start_tx;
1780 	struct xl_chain	*prev_tx;
1781 	int		error, idx;
1782 
1783 	sc = ifp->if_softc;
1784 
1785 	if (ifq_is_oactive(&ifp->if_snd))
1786 		return;
1787 
1788 	idx = sc->xl_cdata.xl_tx_prod;
1789 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1790 
1791 	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1792 
1793 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1794 			ifq_set_oactive(&ifp->if_snd);
1795 			break;
1796 		}
1797 
1798 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1799 		if (m_head == NULL)
1800 			break;
1801 
1802 		prev_tx = cur_tx;
1803 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1804 
1805 		/* Pack the data into the descriptor. */
1806 		error = xl_encap(sc, cur_tx, m_head);
1807 		if (error) {
1808 			cur_tx = prev_tx;
1809 			continue;
1810 		}
1811 
1812 		/* Chain it together. */
1813 		if (prev != NULL)
1814 			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1815 		prev = cur_tx;
1816 
1817 #if NBPFILTER > 0
1818 		/*
1819 		 * If there's a BPF listener, bounce a copy of this frame
1820 		 * to him.
1821 		 */
1822 		if (ifp->if_bpf)
1823 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1824 			    BPF_DIRECTION_OUT);
1825 #endif
1826 
1827 		XL_INC(idx, XL_TX_LIST_CNT);
1828 		sc->xl_cdata.xl_tx_cnt++;
1829 	}
1830 
1831 	/*
1832 	 * If there are no packets queued, bail.
1833 	 */
1834 	if (cur_tx == NULL)
1835 		return;
1836 
1837 	/*
1838 	 * Place the request for the upload interrupt
1839 	 * in the last descriptor in the chain. This way, if
1840 	 * we're chaining several packets at once, we'll only
1841 	 * get an interrupt once for the whole chain rather than
1842 	 * once for each packet.
1843 	 */
1844 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1845 
1846 	/* Start transmission */
1847 	sc->xl_cdata.xl_tx_prod = idx;
1848 	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1849 
1850 	/*
1851 	 * Set a timeout in case the chip goes out to lunch.
1852 	 */
1853 	ifp->if_timer = 5;
1854 }
1855 
1856 void
1857 xl_init(void *xsc)
1858 {
1859 	struct xl_softc		*sc = xsc;
1860 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
1861 	int			s, i;
1862 	struct mii_data		*mii = NULL;
1863 
1864 	s = splnet();
1865 
1866 	/*
1867 	 * Cancel pending I/O and free all RX/TX buffers.
1868 	 */
1869 	xl_stop(sc);
1870 
1871 	/* Reset the chip to a known state. */
1872 	xl_reset(sc);
1873 
1874 	if (sc->xl_hasmii)
1875 		mii = &sc->sc_mii;
1876 
1877 	if (mii == NULL) {
1878 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1879 		xl_wait(sc);
1880 	}
1881 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1882 	xl_wait(sc);
1883 	DELAY(10000);
1884 
1885 	/* Init our MAC address */
1886 	XL_SEL_WIN(2);
1887 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1888 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1889 				sc->sc_arpcom.ac_enaddr[i]);
1890 	}
1891 
1892 	/* Clear the station mask. */
1893 	for (i = 0; i < 3; i++)
1894 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1895 #ifdef notdef
1896 	/* Reset TX and RX. */
1897 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1898 	xl_wait(sc);
1899 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1900 	xl_wait(sc);
1901 #endif
1902 	/* Init circular RX list. */
1903 	if (xl_list_rx_init(sc) == ENOBUFS) {
1904 		printf("%s: initialization failed: no "
1905 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1906 		xl_stop(sc);
1907 		splx(s);
1908 		return;
1909 	}
1910 
1911 	/* Init TX descriptors. */
1912 	if (sc->xl_type == XL_TYPE_905B)
1913 		xl_list_tx_init_90xB(sc);
1914 	else
1915 		xl_list_tx_init(sc);
1916 
1917 	/*
1918 	 * Set the TX freethresh value.
1919 	 * Note that this has no effect on 3c905B "cyclone"
1920 	 * cards but is required for 3c900/3c905 "boomerang"
1921 	 * cards in order to enable the download engine.
1922 	 */
1923 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1924 
1925 	/* Set the TX start threshold for best performance. */
1926 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1927 
1928 	/*
1929 	 * If this is a 3c905B, also set the tx reclaim threshold.
1930 	 * This helps cut down on the number of tx reclaim errors
1931 	 * that could happen on a busy network. The chip multiplies
1932 	 * the register value by 16 to obtain the actual threshold
1933 	 * in bytes, so we divide by 16 when setting the value here.
1934 	 * The existing threshold value can be examined by reading
1935 	 * the register at offset 9 in window 5.
1936 	 */
1937 	if (sc->xl_type == XL_TYPE_905B) {
1938 		CSR_WRITE_2(sc, XL_COMMAND,
1939 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1940 	}
1941 
1942 	/* Program promiscuous mode and multicast filters. */
1943 	xl_iff(sc);
1944 
1945 	/*
1946 	 * Load the address of the RX list. We have to
1947 	 * stall the upload engine before we can manipulate
1948 	 * the uplist pointer register, then unstall it when
1949 	 * we're finished. We also have to wait for the
1950 	 * stall command to complete before proceeding.
1951 	 * Note that we have to do this after any RX resets
1952 	 * have completed since the uplist register is cleared
1953 	 * by a reset.
1954 	 */
1955 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1956 	xl_wait(sc);
1957 	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
1958 	    offsetof(struct xl_list_data, xl_rx_list[0]));
1959 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1960 	xl_wait(sc);
1961 
1962 	if (sc->xl_type == XL_TYPE_905B) {
1963 		/* Set polling interval */
1964 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1965 		/* Load the address of the TX list */
1966 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1967 		xl_wait(sc);
1968 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1969 		    sc->sc_listmap->dm_segs[0].ds_addr +
1970 		    offsetof(struct xl_list_data, xl_tx_list[0]));
1971 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1972 		xl_wait(sc);
1973 	}
1974 
1975 	/*
1976 	 * If the coax transceiver is on, make sure to enable
1977 	 * the DC-DC converter.
1978  	 */
1979 	XL_SEL_WIN(3);
1980 	if (sc->xl_xcvr == XL_XCVR_COAX)
1981 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1982 	else
1983 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1984 
1985 	/*
1986 	 * increase packet size to allow reception of 802.1q or ISL packets.
1987 	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
1988 	 * control register. For 3c90xB/C chips, use the RX packet size
1989 	 * register.
1990 	 */
1991 
1992 	if (sc->xl_type == XL_TYPE_905B)
1993 		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
1994 	else {
1995 		u_int8_t macctl;
1996 		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
1997 		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
1998 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
1999 	}
2000 
2001 	/* Clear out the stats counters. */
2002 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2003 	sc->xl_stats_no_timeout = 1;
2004 	xl_stats_update(sc);
2005 	sc->xl_stats_no_timeout = 0;
2006 	XL_SEL_WIN(4);
2007 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2008 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2009 
2010 	/*
2011 	 * Enable interrupts.
2012 	 */
2013 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2014 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2015 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2016 
2017 	if (sc->intr_ack)
2018 		(*sc->intr_ack)(sc);
2019 
2020 	/* Set the RX early threshold */
2021 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2022 	CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2023 
2024 	/* Enable receiver and transmitter. */
2025 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2026 	xl_wait(sc);
2027 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2028 	xl_wait(sc);
2029 
2030 	/* Restore state of BMCR */
2031 	if (mii != NULL)
2032 		mii_mediachg(mii);
2033 
2034 	/* Select window 7 for normal operations. */
2035 	XL_SEL_WIN(7);
2036 
2037 	ifp->if_flags |= IFF_RUNNING;
2038 	ifq_clr_oactive(&ifp->if_snd);
2039 
2040 	splx(s);
2041 
2042 	timeout_add_sec(&sc->xl_stsup_tmo, 1);
2043 }
2044 
2045 /*
2046  * Set media options.
2047  */
2048 int
2049 xl_ifmedia_upd(struct ifnet *ifp)
2050 {
2051 	struct xl_softc		*sc;
2052 	struct ifmedia		*ifm = NULL;
2053 	struct mii_data		*mii = NULL;
2054 
2055 	sc = ifp->if_softc;
2056 
2057 	if (sc->xl_hasmii)
2058 		mii = &sc->sc_mii;
2059 	if (mii == NULL)
2060 		ifm = &sc->ifmedia;
2061 	else
2062 		ifm = &mii->mii_media;
2063 
2064 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2065 	case IFM_100_FX:
2066 	case IFM_10_FL:
2067 	case IFM_10_2:
2068 	case IFM_10_5:
2069 		xl_setmode(sc, ifm->ifm_media);
2070 		return (0);
2071 		break;
2072 	default:
2073 		break;
2074 	}
2075 
2076 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2077 		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2078 		xl_init(sc);
2079 	} else {
2080 		xl_setmode(sc, ifm->ifm_media);
2081 	}
2082 
2083 	return (0);
2084 }
2085 
2086 /*
2087  * Report current media status.
2088  */
2089 void
2090 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2091 {
2092 	struct xl_softc		*sc;
2093 	u_int32_t		icfg;
2094 	u_int16_t		status = 0;
2095 	struct mii_data		*mii = NULL;
2096 
2097 	sc = ifp->if_softc;
2098 	if (sc->xl_hasmii != 0)
2099 		mii = &sc->sc_mii;
2100 
2101 	XL_SEL_WIN(4);
2102 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2103 
2104 	XL_SEL_WIN(3);
2105 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2106 	icfg >>= XL_ICFG_CONNECTOR_BITS;
2107 
2108 	ifmr->ifm_active = IFM_ETHER;
2109 	ifmr->ifm_status = IFM_AVALID;
2110 
2111 	if ((status & XL_MEDIASTAT_CARRIER) == 0)
2112 		ifmr->ifm_status |= IFM_ACTIVE;
2113 
2114 	switch(icfg) {
2115 	case XL_XCVR_10BT:
2116 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2117 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2118 			ifmr->ifm_active |= IFM_FDX;
2119 		else
2120 			ifmr->ifm_active |= IFM_HDX;
2121 		break;
2122 	case XL_XCVR_AUI:
2123 		if (sc->xl_type == XL_TYPE_905B &&
2124 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2125 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2126 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2127 				ifmr->ifm_active |= IFM_FDX;
2128 			else
2129 				ifmr->ifm_active |= IFM_HDX;
2130 		} else
2131 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2132 		break;
2133 	case XL_XCVR_COAX:
2134 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2135 		break;
2136 	/*
2137 	 * XXX MII and BTX/AUTO should be separate cases.
2138 	 */
2139 
2140 	case XL_XCVR_100BTX:
2141 	case XL_XCVR_AUTO:
2142 	case XL_XCVR_MII:
2143 		if (mii != NULL) {
2144 			mii_pollstat(mii);
2145 			ifmr->ifm_active = mii->mii_media_active;
2146 			ifmr->ifm_status = mii->mii_media_status;
2147 		}
2148 		break;
2149 	case XL_XCVR_100BFX:
2150 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2151 		break;
2152 	default:
2153 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2154 		break;
2155 	}
2156 }
2157 
2158 int
2159 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2160 {
2161 	struct xl_softc *sc = ifp->if_softc;
2162 	struct ifreq *ifr = (struct ifreq *)data;
2163 	int s, error = 0;
2164 	struct mii_data *mii = NULL;
2165 
2166 	s = splnet();
2167 
2168 	switch(command) {
2169 	case SIOCSIFADDR:
2170 		ifp->if_flags |= IFF_UP;
2171 		if (!(ifp->if_flags & IFF_RUNNING))
2172 			xl_init(sc);
2173 		break;
2174 
2175 	case SIOCSIFFLAGS:
2176 		if (ifp->if_flags & IFF_UP) {
2177 			if (ifp->if_flags & IFF_RUNNING)
2178 				error = ENETRESET;
2179 			else
2180 				xl_init(sc);
2181 		} else {
2182 			if (ifp->if_flags & IFF_RUNNING)
2183 				xl_stop(sc);
2184 		}
2185 		break;
2186 
2187 	case SIOCGIFMEDIA:
2188 	case SIOCSIFMEDIA:
2189 		if (sc->xl_hasmii != 0)
2190 			mii = &sc->sc_mii;
2191 		if (mii == NULL)
2192 			error = ifmedia_ioctl(ifp, ifr,
2193 			    &sc->ifmedia, command);
2194 		else
2195 			error = ifmedia_ioctl(ifp, ifr,
2196 			    &mii->mii_media, command);
2197 		break;
2198 
2199 	case SIOCGIFRXR:
2200 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
2201 		    NULL, MCLBYTES, &sc->xl_cdata.xl_rx_ring);
2202 		break;
2203 
2204 	default:
2205 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2206 	}
2207 
2208 	if (error == ENETRESET) {
2209 		if (ifp->if_flags & IFF_RUNNING)
2210 			xl_iff(sc);
2211 		error = 0;
2212 	}
2213 
2214 	splx(s);
2215 	return (error);
2216 }
2217 
2218 void
2219 xl_watchdog(struct ifnet *ifp)
2220 {
2221 	struct xl_softc		*sc;
2222 	u_int16_t		status = 0;
2223 
2224 	sc = ifp->if_softc;
2225 
2226 	ifp->if_oerrors++;
2227 	XL_SEL_WIN(4);
2228 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2229 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2230 
2231 	if (status & XL_MEDIASTAT_CARRIER)
2232 		printf("%s: no carrier - transceiver cable problem?\n",
2233 								sc->sc_dev.dv_xname);
2234 	xl_txeoc(sc);
2235 	xl_txeof(sc);
2236 	xl_rxeof(sc);
2237 	xl_init(sc);
2238 
2239 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2240 		(*ifp->if_start)(ifp);
2241 }
2242 
2243 void
2244 xl_freetxrx(struct xl_softc *sc)
2245 {
2246 	bus_dmamap_t	map;
2247 	int		i;
2248 
2249 	/*
2250 	 * Free data in the RX lists.
2251 	 */
2252 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2253 		if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2254 			map = sc->xl_cdata.xl_rx_chain[i].map;
2255 
2256 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2257 			    BUS_DMASYNC_POSTREAD);
2258 			bus_dmamap_unload(sc->sc_dmat, map);
2259 		}
2260 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2261 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2262 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2263 		}
2264 	}
2265 	bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list));
2266 	/*
2267 	 * Free the TX list buffers.
2268 	 */
2269 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2270 		if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2271 			map = sc->xl_cdata.xl_tx_chain[i].map;
2272 
2273 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2274 			    BUS_DMASYNC_POSTWRITE);
2275 			bus_dmamap_unload(sc->sc_dmat, map);
2276 		}
2277 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2278 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2279 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2280 		}
2281 	}
2282 	bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list));
2283 }
2284 
2285 /*
2286  * Stop the adapter and free any mbufs allocated to the
2287  * RX and TX lists.
2288  */
2289 void
2290 xl_stop(struct xl_softc *sc)
2291 {
2292 	struct ifnet *ifp;
2293 
2294 	/* Stop the stats updater. */
2295 	timeout_del(&sc->xl_stsup_tmo);
2296 
2297 	ifp = &sc->sc_arpcom.ac_if;
2298 
2299 	ifp->if_flags &= ~IFF_RUNNING;
2300 	ifq_clr_oactive(&ifp->if_snd);
2301 	ifp->if_timer = 0;
2302 
2303 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2304 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2305 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2306 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2307 	xl_wait(sc);
2308 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2309 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2310 	DELAY(800);
2311 
2312 #ifdef foo
2313 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2314 	xl_wait(sc);
2315 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2316 	xl_wait(sc);
2317 #endif
2318 
2319 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2320 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2321 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2322 
2323 	if (sc->intr_ack)
2324 		(*sc->intr_ack)(sc);
2325 
2326 	xl_freetxrx(sc);
2327 }
2328 
2329 #ifndef SMALL_KERNEL
2330 void
2331 xl_wol_power(struct xl_softc *sc)
2332 {
2333 	/* Re-enable RX and call upper layer WOL power routine
2334 	 * if WOL is enabled. */
2335 	if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) {
2336 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2337 		sc->wol_power(sc->wol_power_arg);
2338 	}
2339 }
2340 #endif
2341 
2342 void
2343 xl_attach(struct xl_softc *sc)
2344 {
2345 	u_int8_t enaddr[ETHER_ADDR_LEN];
2346 	u_int16_t		xcvr[2];
2347 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2348 	int i;
2349 	uint64_t media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2350 	struct ifmedia *ifm;
2351 
2352 	i = splnet();
2353 	xl_reset(sc);
2354 	splx(i);
2355 
2356 	/*
2357 	 * Get station address from the EEPROM.
2358 	 */
2359 	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2360 		printf("\n%s: failed to read station address\n",
2361 		    sc->sc_dev.dv_xname);
2362 		return;
2363 	}
2364 	memcpy(&sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
2365 
2366 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2367 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2368 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2369 		printf(": can't alloc list mem\n");
2370 		return;
2371 	}
2372 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2373 	    sizeof(struct xl_list_data), &sc->sc_listkva,
2374 	    BUS_DMA_NOWAIT) != 0) {
2375 		printf(": can't map list mem\n");
2376 		return;
2377 	}
2378 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2379 	    sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2380 	    &sc->sc_listmap) != 0) {
2381 		printf(": can't alloc list map\n");
2382 		return;
2383 	}
2384 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2385 	    sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2386 		printf(": can't load list map\n");
2387 		return;
2388 	}
2389 	sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2390 
2391 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2392 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2393 		    0, BUS_DMA_NOWAIT,
2394 		    &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2395 			printf(": can't create rx map\n");
2396 			return;
2397 		}
2398 	}
2399 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2400 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2401 		printf(": can't create rx spare map\n");
2402 		return;
2403 	}
2404 
2405 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2406 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2407 		    XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2408 		    &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2409 			printf(": can't create tx map\n");
2410 			return;
2411 		}
2412 	}
2413 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2414 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2415 		printf(": can't create tx spare map\n");
2416 		return;
2417 	}
2418 
2419 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2420 
2421 	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2422 		u_int16_t n;
2423 
2424 		XL_SEL_WIN(2);
2425 		n = CSR_READ_2(sc, 12);
2426 
2427 		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2428 			n |= 0x0010;
2429 
2430 		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2431 			n |= 0x4000;
2432 
2433 		CSR_WRITE_2(sc, 12, n);
2434 	}
2435 
2436 	/*
2437 	 * Figure out the card type. 3c905B adapters have the
2438 	 * 'supportsNoTxLength' bit set in the capabilities
2439 	 * word in the EEPROM.
2440 	 * Note: my 3c575C cardbus card lies. It returns a value
2441 	 * of 0x1578 for its capabilities word, which is somewhat
2442 	 * nonsensical. Another way to distinguish a 3c90x chip
2443 	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2444 	 * bit. This will only be set for 3c90x boomerage chips.
2445 	 */
2446 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2447 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2448 	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2449 		sc->xl_type = XL_TYPE_905B;
2450 	else
2451 		sc->xl_type = XL_TYPE_90X;
2452 
2453 	/* Set the TX start threshold for best performance. */
2454 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2455 
2456 	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2457 
2458 	ifp->if_softc = sc;
2459 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2460 	ifp->if_ioctl = xl_ioctl;
2461 	if (sc->xl_type == XL_TYPE_905B)
2462 		ifp->if_start = xl_start_90xB;
2463 	else
2464 		ifp->if_start = xl_start;
2465 	ifp->if_watchdog = xl_watchdog;
2466 	ifp->if_baudrate = 10000000;
2467 	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2468 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
2469 
2470 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2471 
2472 #ifndef XL905B_TXCSUM_BROKEN
2473 	ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2474 				IFCAP_CSUM_UDPv4;
2475 #endif
2476 
2477 	XL_SEL_WIN(3);
2478 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2479 
2480 	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2481 	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2482 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2483 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2484 
2485 	xl_mediacheck(sc);
2486 
2487 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2488 	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2489 		ifmedia_init(&sc->sc_mii.mii_media, 0,
2490 		    xl_ifmedia_upd, xl_ifmedia_sts);
2491 		sc->xl_hasmii = 1;
2492 		sc->sc_mii.mii_ifp = ifp;
2493 		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2494 		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2495 		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2496 		xl_setcfg(sc);
2497 		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2498 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2499 
2500 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2501 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2502 			    0, NULL);
2503 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2504 		}
2505 		else {
2506 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2507 		}
2508 		ifm = &sc->sc_mii.mii_media;
2509 	}
2510 	else {
2511 		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2512 		sc->xl_hasmii = 0;
2513 		ifm = &sc->ifmedia;
2514 	}
2515 
2516 	/*
2517 	 * Sanity check. If the user has selected "auto" and this isn't
2518 	 * a 10/100 card of some kind, we need to force the transceiver
2519 	 * type to something sane.
2520 	 */
2521 	if (sc->xl_xcvr == XL_XCVR_AUTO)
2522 		xl_choose_xcvr(sc, 0);
2523 
2524 	if (sc->xl_media & XL_MEDIAOPT_BT) {
2525 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2526 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2527 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2528 			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2529 	}
2530 
2531 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2532 		/*
2533 		 * Check for a 10baseFL board in disguise.
2534 		 */
2535 		if (sc->xl_type == XL_TYPE_905B &&
2536 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2537 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2538 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2539 			    0, NULL);
2540 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2541 				ifmedia_add(ifm,
2542 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2543 		} else {
2544 			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2545 		}
2546 	}
2547 
2548 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2549 		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2550 	}
2551 
2552 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2553 		ifp->if_baudrate = 100000000;
2554 		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2555 	}
2556 
2557 	/* Choose a default media. */
2558 	switch(sc->xl_xcvr) {
2559 	case XL_XCVR_10BT:
2560 		media = IFM_ETHER|IFM_10_T;
2561 		xl_setmode(sc, media);
2562 		break;
2563 	case XL_XCVR_AUI:
2564 		if (sc->xl_type == XL_TYPE_905B &&
2565 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2566 			media = IFM_ETHER|IFM_10_FL;
2567 			xl_setmode(sc, media);
2568 		} else {
2569 			media = IFM_ETHER|IFM_10_5;
2570 			xl_setmode(sc, media);
2571 		}
2572 		break;
2573 	case XL_XCVR_COAX:
2574 		media = IFM_ETHER|IFM_10_2;
2575 		xl_setmode(sc, media);
2576 		break;
2577 	case XL_XCVR_AUTO:
2578 	case XL_XCVR_100BTX:
2579 	case XL_XCVR_MII:
2580 		/* Chosen by miibus */
2581 		break;
2582 	case XL_XCVR_100BFX:
2583 		media = IFM_ETHER|IFM_100_FX;
2584 		xl_setmode(sc, media);
2585 		break;
2586 	default:
2587 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2588 							sc->xl_xcvr);
2589 		/*
2590 		 * This will probably be wrong, but it prevents
2591 		 * the ifmedia code from panicking.
2592 		 */
2593 		media = IFM_ETHER | IFM_10_T;
2594 		break;
2595 	}
2596 
2597 	if (sc->xl_hasmii == 0)
2598 		ifmedia_set(&sc->ifmedia, media);
2599 
2600 	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2601 		XL_SEL_WIN(0);
2602 		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2603 	}
2604 
2605 #ifndef SMALL_KERNEL
2606 	/* Check availability of WOL. */
2607 	if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) {
2608 		ifp->if_capabilities |= IFCAP_WOL;
2609 		ifp->if_wol = xl_wol;
2610 		xl_wol(ifp, 0);
2611 	}
2612 #endif
2613 
2614 	/*
2615 	 * Call MI attach routines.
2616 	 */
2617 	if_attach(ifp);
2618 	ether_ifattach(ifp);
2619 }
2620 
2621 int
2622 xl_detach(struct xl_softc *sc)
2623 {
2624 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2625 	extern void xl_freetxrx(struct xl_softc *);
2626 
2627 	/* Unhook our tick handler. */
2628 	timeout_del(&sc->xl_stsup_tmo);
2629 
2630 	xl_freetxrx(sc);
2631 
2632 	/* Detach all PHYs */
2633 	if (sc->xl_hasmii)
2634 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2635 
2636 	/* Delete all remaining media. */
2637 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2638 
2639 	ether_ifdetach(ifp);
2640 	if_detach(ifp);
2641 
2642 	return (0);
2643 }
2644 
2645 #ifndef SMALL_KERNEL
2646 int
2647 xl_wol(struct ifnet *ifp, int enable)
2648 {
2649 	struct xl_softc		*sc = ifp->if_softc;
2650 
2651 	XL_SEL_WIN(7);
2652 	if (enable) {
2653 		if (!(ifp->if_flags & IFF_RUNNING))
2654 			xl_init(sc);
2655 		CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC);
2656 		sc->xl_flags |= XL_FLAG_WOL;
2657 	} else {
2658 		CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
2659 		sc->xl_flags &= ~XL_FLAG_WOL;
2660 	}
2661 	return (0);
2662 }
2663 #endif
2664 
2665 struct cfdriver xl_cd = {
2666 	0, "xl", DV_IFNET
2667 };
2668