xref: /dflybsd-src/sys/dev/netif/tl/if_tl.c (revision 15a56cb3807bfc9539b6ac36cb59d42bd9af9659)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_tl.c,v 1.51.2.5 2001/12/16 15:46:08 luigi Exp $
33  * $DragonFly: src/sys/dev/netif/tl/if_tl.c,v 1.33 2005/11/28 17:13:44 dillon Exp $
34  */
35 
36 /*
37  * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38  * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39  * the National Semiconductor DP83840A physical interface and the
40  * Microchip Technology 24Cxx series serial EEPROM.
41  *
42  * Written using the following four documents:
43  *
44  * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45  * National Semiconductor DP83840A data sheet (www.national.com)
46  * Microchip Technology 24C02C data sheet (www.microchip.com)
47  * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48  *
49  * Written by Bill Paul <wpaul@ctr.columbia.edu>
50  * Electrical Engineering Department
51  * Columbia University, New York City
52  */
53 
54 /*
55  * Some notes about the ThunderLAN:
56  *
57  * The ThunderLAN controller is a single chip containing PCI controller
58  * logic, approximately 3K of on-board SRAM, a LAN controller, and media
59  * independent interface (MII) bus. The MII allows the ThunderLAN chip to
60  * control up to 32 different physical interfaces (PHYs). The ThunderLAN
61  * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
62  * to act as a complete ethernet interface.
63  *
64  * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
65  * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
66  * in full or half duplex. Some of the Compaq Deskpro machines use a
67  * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
68  * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
69  * concert with the ThunderLAN's internal PHY to provide full 10/100
70  * support. This is cheaper than using a standalone external PHY for both
71  * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
72  * A serial EEPROM is also attached to the ThunderLAN chip to provide
73  * power-up default register settings and for storing the adapter's
74  * station address. Although not supported by this driver, the ThunderLAN
75  * chip can also be connected to token ring PHYs.
76  *
77  * The ThunderLAN has a set of registers which can be used to issue
78  * commands, acknowledge interrupts, and to manipulate other internal
79  * registers on its DIO bus. The primary registers can be accessed
80  * using either programmed I/O (inb/outb) or via PCI memory mapping,
81  * depending on how the card is configured during the PCI probing
82  * phase. It is even possible to have both PIO and memory mapped
83  * access turned on at the same time.
84  *
85  * Frame reception and transmission with the ThunderLAN chip is done
86  * using frame 'lists.' A list structure looks more or less like this:
87  *
88  * struct tl_frag {
89  *	u_int32_t		fragment_address;
90  *	u_int32_t		fragment_size;
91  * };
92  * struct tl_list {
93  *	u_int32_t		forward_pointer;
94  *	u_int16_t		cstat;
95  *	u_int16_t		frame_size;
96  *	struct tl_frag		fragments[10];
97  * };
98  *
99  * The forward pointer in the list header can be either a 0 or the address
100  * of another list, which allows several lists to be linked together. Each
101  * list contains up to 10 fragment descriptors. This means the chip allows
102  * ethernet frames to be broken up into up to 10 chunks for transfer to
103  * and from the SRAM. Note that the forward pointer and fragment buffer
104  * addresses are physical memory addresses, not virtual. Note also that
105  * a single ethernet frame can not span lists: if the host wants to
106  * transmit a frame and the frame data is split up over more than 10
107  * buffers, the frame has to collapsed before it can be transmitted.
108  *
109  * To receive frames, the driver sets up a number of lists and populates
110  * the fragment descriptors, then it sends an RX GO command to the chip.
111  * When a frame is received, the chip will DMA it into the memory regions
112  * specified by the fragment descriptors and then trigger an RX 'end of
113  * frame interrupt' when done. The driver may choose to use only one
114  * fragment per list; this may result is slighltly less efficient use
115  * of memory in exchange for improving performance.
116  *
117  * To transmit frames, the driver again sets up lists and fragment
118  * descriptors, only this time the buffers contain frame data that
119  * is to be DMA'ed into the chip instead of out of it. Once the chip
120  * has transfered the data into its on-board SRAM, it will trigger a
121  * TX 'end of frame' interrupt. It will also generate an 'end of channel'
122  * interrupt when it reaches the end of the list.
123  */
124 
125 /*
126  * Some notes about this driver:
127  *
128  * The ThunderLAN chip provides a couple of different ways to organize
129  * reception, transmission and interrupt handling. The simplest approach
130  * is to use one list each for transmission and reception. In this mode,
131  * the ThunderLAN will generate two interrupts for every received frame
132  * (one RX EOF and one RX EOC) and two for each transmitted frame (one
133  * TX EOF and one TX EOC). This may make the driver simpler but it hurts
134  * performance to have to handle so many interrupts.
135  *
136  * Initially I wanted to create a circular list of receive buffers so
137  * that the ThunderLAN chip would think there was an infinitely long
138  * receive channel and never deliver an RXEOC interrupt. However this
139  * doesn't work correctly under heavy load: while the manual says the
140  * chip will trigger an RXEOF interrupt each time a frame is copied into
141  * memory, you can't count on the chip waiting around for you to acknowledge
142  * the interrupt before it starts trying to DMA the next frame. The result
143  * is that the chip might traverse the entire circular list and then wrap
144  * around before you have a chance to do anything about it. Consequently,
145  * the receive list is terminated (with a 0 in the forward pointer in the
146  * last element). Each time an RXEOF interrupt arrives, the used list
147  * is shifted to the end of the list. This gives the appearance of an
148  * infinitely large RX chain so long as the driver doesn't fall behind
149  * the chip and allow all of the lists to be filled up.
150  *
151  * If all the lists are filled, the adapter will deliver an RX 'end of
152  * channel' interrupt when it hits the 0 forward pointer at the end of
153  * the chain. The RXEOC handler then cleans out the RX chain and resets
154  * the list head pointer in the ch_parm register and restarts the receiver.
155  *
156  * For frame transmission, it is possible to program the ThunderLAN's
157  * transmit interrupt threshold so that the chip can acknowledge multiple
158  * lists with only a single TX EOF interrupt. This allows the driver to
159  * queue several frames in one shot, and only have to handle a total
160  * two interrupts (one TX EOF and one TX EOC) no matter how many frames
161  * are transmitted. Frame transmission is done directly out of the
162  * mbufs passed to the tl_start() routine via the interface send queue.
163  * The driver simply sets up the fragment descriptors in the transmit
164  * lists to point to the mbuf data regions and sends a TX GO command.
165  *
166  * Note that since the RX and TX lists themselves are always used
167  * only by the driver, the are malloc()ed once at driver initialization
168  * time and never free()ed.
169  *
170  * Also, in order to remain as platform independent as possible, this
171  * driver uses memory mapped register access to manipulate the card
172  * as opposed to programmed I/O. This avoids the use of the inb/outb
173  * (and related) instructions which are specific to the i386 platform.
174  *
175  * Using these techniques, this driver achieves very high performance
176  * by minimizing the amount of interrupts generated during large
177  * transfers and by completely avoiding buffer copies. Frame transfer
178  * to and from the ThunderLAN chip is performed entirely by the chip
179  * itself thereby reducing the load on the host CPU.
180  */
181 
182 #include <sys/param.h>
183 #include <sys/systm.h>
184 #include <sys/sockio.h>
185 #include <sys/mbuf.h>
186 #include <sys/malloc.h>
187 #include <sys/kernel.h>
188 #include <sys/socket.h>
189 #include <sys/serialize.h>
190 #include <sys/thread2.h>
191 
192 #include <net/if.h>
193 #include <net/ifq_var.h>
194 #include <net/if_arp.h>
195 #include <net/ethernet.h>
196 #include <net/if_dl.h>
197 #include <net/if_media.h>
198 
199 #include <net/bpf.h>
200 
201 #include <vm/vm.h>              /* for vtophys */
202 #include <vm/pmap.h>            /* for vtophys */
203 #include <machine/bus_memio.h>
204 #include <machine/bus_pio.h>
205 #include <machine/bus.h>
206 #include <machine/resource.h>
207 #include <sys/bus.h>
208 #include <sys/rman.h>
209 
210 #include "../mii_layer/mii.h"
211 #include "../mii_layer/miivar.h"
212 
213 #include <bus/pci/pcireg.h>
214 #include <bus/pci/pcivar.h>
215 
216 /*
217  * Default to using PIO register access mode to pacify certain
218  * laptop docking stations with built-in ThunderLAN chips that
219  * don't seem to handle memory mapped mode properly.
220  */
221 #define TL_USEIOSPACE
222 
223 #include "if_tlreg.h"
224 
225 /* "controller miibus0" required.  See GENERIC if you get errors here. */
226 #include "miibus_if.h"
227 
228 /*
229  * Various supported device vendors/types and their names.
230  */
231 
232 static struct tl_type tl_devs[] = {
233 	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
234 		"Texas Instruments ThunderLAN" },
235 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
236 		"Compaq Netelligent 10" },
237 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
238 		"Compaq Netelligent 10/100" },
239 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
240 		"Compaq Netelligent 10/100 Proliant" },
241 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
242 		"Compaq Netelligent 10/100 Dual Port" },
243 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
244 		"Compaq NetFlex-3/P Integrated" },
245 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
246 		"Compaq NetFlex-3/P" },
247 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
248 		"Compaq NetFlex 3/P w/ BNC" },
249 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
250 		"Compaq Netelligent 10/100 TX Embedded UTP" },
251 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
252 		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
253 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
254 		"Compaq Netelligent 10/100 TX UTP" },
255 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
256 		"Olicom OC-2183/2185" },
257 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
258 		"Olicom OC-2325" },
259 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
260 		"Olicom OC-2326 10/100 TX UTP" },
261 	{ 0, 0, NULL }
262 };
263 
264 static int tl_probe		(device_t);
265 static int tl_attach		(device_t);
266 static int tl_detach		(device_t);
267 static int tl_intvec_rxeoc	(void *, u_int32_t);
268 static int tl_intvec_txeoc	(void *, u_int32_t);
269 static int tl_intvec_txeof	(void *, u_int32_t);
270 static int tl_intvec_rxeof	(void *, u_int32_t);
271 static int tl_intvec_adchk	(void *, u_int32_t);
272 static int tl_intvec_netsts	(void *, u_int32_t);
273 
274 static int tl_newbuf		(struct tl_softc *,
275 					struct tl_chain_onefrag *);
276 static void tl_stats_update	(void *);
277 static void tl_stats_update_serialized(void *);
278 static int tl_encap		(struct tl_softc *, struct tl_chain *,
279 						struct mbuf *);
280 
281 static void tl_intr		(void *);
282 static void tl_start		(struct ifnet *);
283 static int tl_ioctl		(struct ifnet *, u_long, caddr_t,
284 						struct ucred *);
285 static void tl_init		(void *);
286 static void tl_stop		(struct tl_softc *);
287 static void tl_watchdog		(struct ifnet *);
288 static void tl_shutdown		(device_t);
289 static int tl_ifmedia_upd	(struct ifnet *);
290 static void tl_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
291 
292 static u_int8_t tl_eeprom_putbyte	(struct tl_softc *, int);
293 static u_int8_t	tl_eeprom_getbyte	(struct tl_softc *,
294 						int, u_int8_t *);
295 static int tl_read_eeprom	(struct tl_softc *, caddr_t, int, int);
296 
297 static void tl_mii_sync		(struct tl_softc *);
298 static void tl_mii_send		(struct tl_softc *, u_int32_t, int);
299 static int tl_mii_readreg	(struct tl_softc *, struct tl_mii_frame *);
300 static int tl_mii_writereg	(struct tl_softc *, struct tl_mii_frame *);
301 static int tl_miibus_readreg	(device_t, int, int);
302 static int tl_miibus_writereg	(device_t, int, int, int);
303 static void tl_miibus_statchg	(device_t);
304 
305 static void tl_setmode		(struct tl_softc *, int);
306 static int tl_calchash		(caddr_t);
307 static void tl_setmulti		(struct tl_softc *);
308 static void tl_setfilt		(struct tl_softc *, caddr_t, int);
309 static void tl_softreset	(struct tl_softc *, int);
310 static void tl_hardreset	(device_t);
311 static int tl_list_rx_init	(struct tl_softc *);
312 static int tl_list_tx_init	(struct tl_softc *);
313 
314 static u_int8_t tl_dio_read8	(struct tl_softc *, int);
315 static u_int16_t tl_dio_read16	(struct tl_softc *, int);
316 static u_int32_t tl_dio_read32	(struct tl_softc *, int);
317 static void tl_dio_write8	(struct tl_softc *, int, int);
318 static void tl_dio_write16	(struct tl_softc *, int, int);
319 static void tl_dio_write32	(struct tl_softc *, int, int);
320 static void tl_dio_setbit	(struct tl_softc *, int, int);
321 static void tl_dio_clrbit	(struct tl_softc *, int, int);
322 static void tl_dio_setbit16	(struct tl_softc *, int, int);
323 static void tl_dio_clrbit16	(struct tl_softc *, int, int);
324 
325 #ifdef TL_USEIOSPACE
326 #define TL_RES		SYS_RES_IOPORT
327 #define TL_RID		TL_PCI_LOIO
328 #else
329 #define TL_RES		SYS_RES_MEMORY
330 #define TL_RID		TL_PCI_LOMEM
331 #endif
332 
333 static device_method_t tl_methods[] = {
334 	/* Device interface */
335 	DEVMETHOD(device_probe,		tl_probe),
336 	DEVMETHOD(device_attach,	tl_attach),
337 	DEVMETHOD(device_detach,	tl_detach),
338 	DEVMETHOD(device_shutdown,	tl_shutdown),
339 
340 	/* bus interface */
341 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
342 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
343 
344 	/* MII interface */
345 	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
346 	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
347 	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
348 
349 	{ 0, 0 }
350 };
351 
352 static driver_t tl_driver = {
353 	"tl",
354 	tl_methods,
355 	sizeof(struct tl_softc)
356 };
357 
358 static devclass_t tl_devclass;
359 
360 DECLARE_DUMMY_MODULE(if_tl);
361 DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, 0, 0);
362 DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
363 
364 static u_int8_t tl_dio_read8(sc, reg)
365 	struct tl_softc		*sc;
366 	int			reg;
367 {
368 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
369 	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
370 }
371 
372 static u_int16_t tl_dio_read16(sc, reg)
373 	struct tl_softc		*sc;
374 	int			reg;
375 {
376 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
377 	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
378 }
379 
380 static u_int32_t tl_dio_read32(sc, reg)
381 	struct tl_softc		*sc;
382 	int			reg;
383 {
384 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
385 	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
386 }
387 
388 static void tl_dio_write8(sc, reg, val)
389 	struct tl_softc		*sc;
390 	int			reg;
391 	int			val;
392 {
393 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
394 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
395 	return;
396 }
397 
398 static void tl_dio_write16(sc, reg, val)
399 	struct tl_softc		*sc;
400 	int			reg;
401 	int			val;
402 {
403 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
404 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
405 	return;
406 }
407 
408 static void tl_dio_write32(sc, reg, val)
409 	struct tl_softc		*sc;
410 	int			reg;
411 	int			val;
412 {
413 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
414 	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
415 	return;
416 }
417 
418 static void tl_dio_setbit(sc, reg, bit)
419 	struct tl_softc		*sc;
420 	int			reg;
421 	int			bit;
422 {
423 	u_int8_t			f;
424 
425 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
426 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
427 	f |= bit;
428 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
429 
430 	return;
431 }
432 
433 static void tl_dio_clrbit(sc, reg, bit)
434 	struct tl_softc		*sc;
435 	int			reg;
436 	int			bit;
437 {
438 	u_int8_t			f;
439 
440 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
441 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
442 	f &= ~bit;
443 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
444 
445 	return;
446 }
447 
448 static void tl_dio_setbit16(sc, reg, bit)
449 	struct tl_softc		*sc;
450 	int			reg;
451 	int			bit;
452 {
453 	u_int16_t			f;
454 
455 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
456 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
457 	f |= bit;
458 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
459 
460 	return;
461 }
462 
463 static void tl_dio_clrbit16(sc, reg, bit)
464 	struct tl_softc		*sc;
465 	int			reg;
466 	int			bit;
467 {
468 	u_int16_t			f;
469 
470 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
471 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
472 	f &= ~bit;
473 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
474 
475 	return;
476 }
477 
478 /*
479  * Send an instruction or address to the EEPROM, check for ACK.
480  */
481 static u_int8_t tl_eeprom_putbyte(sc, byte)
482 	struct tl_softc		*sc;
483 	int			byte;
484 {
485 	int		i, ack = 0;
486 
487 	/*
488 	 * Make sure we're in TX mode.
489 	 */
490 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
491 
492 	/*
493 	 * Feed in each bit and stobe the clock.
494 	 */
495 	for (i = 0x80; i; i >>= 1) {
496 		if (byte & i) {
497 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
498 		} else {
499 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
500 		}
501 		DELAY(1);
502 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
503 		DELAY(1);
504 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
505 	}
506 
507 	/*
508 	 * Turn off TX mode.
509 	 */
510 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
511 
512 	/*
513 	 * Check for ack.
514 	 */
515 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
516 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
517 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
518 
519 	return(ack);
520 }
521 
522 /*
523  * Read a byte of data stored in the EEPROM at address 'addr.'
524  */
525 static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
526 	struct tl_softc		*sc;
527 	int			addr;
528 	u_int8_t		*dest;
529 {
530 	int		i;
531 	u_int8_t		byte = 0;
532 
533 	tl_dio_write8(sc, TL_NETSIO, 0);
534 
535 	EEPROM_START;
536 
537 	/*
538 	 * Send write control code to EEPROM.
539 	 */
540 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
541 		if_printf(&sc->arpcom.ac_if, "failed to send write command, "
542 			  "status: %x\n", tl_dio_read8(sc, TL_NETSIO));
543 		return(1);
544 	}
545 
546 	/*
547 	 * Send address of byte we want to read.
548 	 */
549 	if (tl_eeprom_putbyte(sc, addr)) {
550 		if_printf(&sc->arpcom.ac_if, "failed to send address, "
551 			  "status: %x\n", tl_dio_read8(sc, TL_NETSIO));
552 		return(1);
553 	}
554 
555 	EEPROM_STOP;
556 	EEPROM_START;
557 	/*
558 	 * Send read control code to EEPROM.
559 	 */
560 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
561 		if_printf(&sc->arpcom.ac_if, "failed to send write command, "
562 			  "status: %x\n", tl_dio_read8(sc, TL_NETSIO));
563 		return(1);
564 	}
565 
566 	/*
567 	 * Start reading bits from EEPROM.
568 	 */
569 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
570 	for (i = 0x80; i; i >>= 1) {
571 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
572 		DELAY(1);
573 		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
574 			byte |= i;
575 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
576 		DELAY(1);
577 	}
578 
579 	EEPROM_STOP;
580 
581 	/*
582 	 * No ACK generated for read, so just return byte.
583 	 */
584 
585 	*dest = byte;
586 
587 	return(0);
588 }
589 
590 /*
591  * Read a sequence of bytes from the EEPROM.
592  */
593 static int tl_read_eeprom(sc, dest, off, cnt)
594 	struct tl_softc		*sc;
595 	caddr_t			dest;
596 	int			off;
597 	int			cnt;
598 {
599 	int			err = 0, i;
600 	u_int8_t		byte = 0;
601 
602 	for (i = 0; i < cnt; i++) {
603 		err = tl_eeprom_getbyte(sc, off + i, &byte);
604 		if (err)
605 			break;
606 		*(dest + i) = byte;
607 	}
608 
609 	return(err ? 1 : 0);
610 }
611 
612 static void tl_mii_sync(sc)
613 	struct tl_softc		*sc;
614 {
615 	int		i;
616 
617 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
618 
619 	for (i = 0; i < 32; i++) {
620 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
621 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
622 	}
623 
624 	return;
625 }
626 
627 static void tl_mii_send(sc, bits, cnt)
628 	struct tl_softc		*sc;
629 	u_int32_t		bits;
630 	int			cnt;
631 {
632 	int			i;
633 
634 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
635 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
636 		if (bits & i) {
637 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
638 		} else {
639 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
640 		}
641 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
642 	}
643 }
644 
645 static int tl_mii_readreg(sc, frame)
646 	struct tl_softc		*sc;
647 	struct tl_mii_frame	*frame;
648 
649 {
650 	int			i, ack;
651 	int			minten = 0;
652 
653 	tl_mii_sync(sc);
654 
655 	/*
656 	 * Set up frame for RX.
657 	 */
658 	frame->mii_stdelim = TL_MII_STARTDELIM;
659 	frame->mii_opcode = TL_MII_READOP;
660 	frame->mii_turnaround = 0;
661 	frame->mii_data = 0;
662 
663 	/*
664 	 * Turn off MII interrupt by forcing MINTEN low.
665 	 */
666 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
667 	if (minten) {
668 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
669 	}
670 
671 	/*
672  	 * Turn on data xmit.
673 	 */
674 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
675 
676 	/*
677 	 * Send command/address info.
678 	 */
679 	tl_mii_send(sc, frame->mii_stdelim, 2);
680 	tl_mii_send(sc, frame->mii_opcode, 2);
681 	tl_mii_send(sc, frame->mii_phyaddr, 5);
682 	tl_mii_send(sc, frame->mii_regaddr, 5);
683 
684 	/*
685 	 * Turn off xmit.
686 	 */
687 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
688 
689 	/* Idle bit */
690 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
691 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
692 
693 	/* Check for ack */
694 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
695 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
696 
697 	/* Complete the cycle */
698 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
699 
700 	/*
701 	 * Now try reading data bits. If the ack failed, we still
702 	 * need to clock through 16 cycles to keep the PHYs in sync.
703 	 */
704 	if (ack) {
705 		for(i = 0; i < 16; i++) {
706 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
707 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
708 		}
709 		goto fail;
710 	}
711 
712 	for (i = 0x8000; i; i >>= 1) {
713 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
714 		if (!ack) {
715 			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
716 				frame->mii_data |= i;
717 		}
718 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
719 	}
720 
721 fail:
722 
723 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
724 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
725 
726 	/* Reenable interrupts */
727 	if (minten) {
728 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
729 	}
730 
731 	if (ack)
732 		return(1);
733 	return(0);
734 }
735 
736 static int tl_mii_writereg(sc, frame)
737 	struct tl_softc		*sc;
738 	struct tl_mii_frame	*frame;
739 
740 {
741 	int			minten;
742 
743 	tl_mii_sync(sc);
744 
745 	/*
746 	 * Set up frame for TX.
747 	 */
748 
749 	frame->mii_stdelim = TL_MII_STARTDELIM;
750 	frame->mii_opcode = TL_MII_WRITEOP;
751 	frame->mii_turnaround = TL_MII_TURNAROUND;
752 
753 	/*
754 	 * Turn off MII interrupt by forcing MINTEN low.
755 	 */
756 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
757 	if (minten) {
758 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
759 	}
760 
761 	/*
762  	 * Turn on data output.
763 	 */
764 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
765 
766 	tl_mii_send(sc, frame->mii_stdelim, 2);
767 	tl_mii_send(sc, frame->mii_opcode, 2);
768 	tl_mii_send(sc, frame->mii_phyaddr, 5);
769 	tl_mii_send(sc, frame->mii_regaddr, 5);
770 	tl_mii_send(sc, frame->mii_turnaround, 2);
771 	tl_mii_send(sc, frame->mii_data, 16);
772 
773 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
774 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
775 
776 	/*
777 	 * Turn off xmit.
778 	 */
779 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
780 
781 	/* Reenable interrupts */
782 	if (minten)
783 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
784 
785 	return(0);
786 }
787 
788 static int tl_miibus_readreg(dev, phy, reg)
789 	device_t		dev;
790 	int			phy, reg;
791 {
792 	struct tl_softc		*sc;
793 	struct tl_mii_frame	frame;
794 
795 	sc = device_get_softc(dev);
796 	bzero((char *)&frame, sizeof(frame));
797 
798 	frame.mii_phyaddr = phy;
799 	frame.mii_regaddr = reg;
800 	tl_mii_readreg(sc, &frame);
801 
802 	return(frame.mii_data);
803 }
804 
805 static int tl_miibus_writereg(dev, phy, reg, data)
806 	device_t		dev;
807 	int			phy, reg, data;
808 {
809 	struct tl_softc		*sc;
810 	struct tl_mii_frame	frame;
811 
812 	sc = device_get_softc(dev);
813 	bzero((char *)&frame, sizeof(frame));
814 
815 	frame.mii_phyaddr = phy;
816 	frame.mii_regaddr = reg;
817 	frame.mii_data = data;
818 
819 	tl_mii_writereg(sc, &frame);
820 
821 	return(0);
822 }
823 
824 static void tl_miibus_statchg(dev)
825 	device_t		dev;
826 {
827 	struct tl_softc		*sc;
828 	struct mii_data		*mii;
829 
830 	sc = device_get_softc(dev);
831 	mii = device_get_softc(sc->tl_miibus);
832 
833 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
834 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
835 	} else {
836 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
837 	}
838 
839 	return;
840 }
841 
842 /*
843  * Set modes for bitrate devices.
844  */
845 static void tl_setmode(sc, media)
846 	struct tl_softc		*sc;
847 	int			media;
848 {
849 	if (IFM_SUBTYPE(media) == IFM_10_5)
850 		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
851 	if (IFM_SUBTYPE(media) == IFM_10_T) {
852 		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
853 		if ((media & IFM_GMASK) == IFM_FDX) {
854 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
855 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
856 		} else {
857 			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
858 			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
859 		}
860 	}
861 
862 	return;
863 }
864 
865 /*
866  * Calculate the hash of a MAC address for programming the multicast hash
867  * table.  This hash is simply the address split into 6-bit chunks
868  * XOR'd, e.g.
869  * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
870  * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
871  * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
872  * the folded 24-bit value is split into 6-bit portions and XOR'd.
873  */
874 static int tl_calchash(addr)
875 	caddr_t			addr;
876 {
877 	int			t;
878 
879 	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
880 		(addr[2] ^ addr[5]);
881 	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
882 }
883 
884 /*
885  * The ThunderLAN has a perfect MAC address filter in addition to
886  * the multicast hash filter. The perfect filter can be programmed
887  * with up to four MAC addresses. The first one is always used to
888  * hold the station address, which leaves us free to use the other
889  * three for multicast addresses.
890  */
891 static void tl_setfilt(sc, addr, slot)
892 	struct tl_softc		*sc;
893 	caddr_t			addr;
894 	int			slot;
895 {
896 	int			i;
897 	u_int16_t		regaddr;
898 
899 	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
900 
901 	for (i = 0; i < ETHER_ADDR_LEN; i++)
902 		tl_dio_write8(sc, regaddr + i, *(addr + i));
903 
904 	return;
905 }
906 
907 /*
908  * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
909  * linked list. This is fine, except addresses are added from the head
910  * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
911  * group to always be in the perfect filter, but as more groups are added,
912  * the 224.0.0.1 entry (which is always added first) gets pushed down
913  * the list and ends up at the tail. So after 3 or 4 multicast groups
914  * are added, the all-hosts entry gets pushed out of the perfect filter
915  * and into the hash table.
916  *
917  * Because the multicast list is a doubly-linked list as opposed to a
918  * circular queue, we don't have the ability to just grab the tail of
919  * the list and traverse it backwards. Instead, we have to traverse
920  * the list once to find the tail, then traverse it again backwards to
921  * update the multicast filter.
922  */
923 static void tl_setmulti(sc)
924 	struct tl_softc		*sc;
925 {
926 	struct ifnet		*ifp;
927 	u_int32_t		hashes[2] = { 0, 0 };
928 	int			h, i;
929 	struct ifmultiaddr	*ifma;
930 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
931 	ifp = &sc->arpcom.ac_if;
932 
933 	/* First, zot all the existing filters. */
934 	for (i = 1; i < 4; i++)
935 		tl_setfilt(sc, (caddr_t)&dummy, i);
936 	tl_dio_write32(sc, TL_HASH1, 0);
937 	tl_dio_write32(sc, TL_HASH2, 0);
938 
939 	/* Now program new ones. */
940 	if (ifp->if_flags & IFF_ALLMULTI) {
941 		hashes[0] = 0xFFFFFFFF;
942 		hashes[1] = 0xFFFFFFFF;
943 	} else {
944 		i = 1;
945 		/* First find the tail of the list. */
946 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
947 					ifma = ifma->ifma_link.le_next) {
948 			if (ifma->ifma_link.le_next == NULL)
949 				break;
950 		}
951 		/* Now traverse the list backwards. */
952 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
953 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
954 			if (ifma->ifma_addr->sa_family != AF_LINK)
955 				continue;
956 			/*
957 			 * Program the first three multicast groups
958 			 * into the perfect filter. For all others,
959 			 * use the hash table.
960 			 */
961 			if (i < 4) {
962 				tl_setfilt(sc,
963 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
964 				i++;
965 				continue;
966 			}
967 
968 			h = tl_calchash(
969 				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
970 			if (h < 32)
971 				hashes[0] |= (1 << h);
972 			else
973 				hashes[1] |= (1 << (h - 32));
974 		}
975 	}
976 
977 	tl_dio_write32(sc, TL_HASH1, hashes[0]);
978 	tl_dio_write32(sc, TL_HASH2, hashes[1]);
979 
980 	return;
981 }
982 
983 /*
984  * This routine is recommended by the ThunderLAN manual to insure that
985  * the internal PHY is powered up correctly. It also recommends a one
986  * second pause at the end to 'wait for the clocks to start' but in my
987  * experience this isn't necessary.
988  */
989 static void tl_hardreset(dev)
990 	device_t		dev;
991 {
992 	struct tl_softc		*sc;
993 	int			i;
994 	u_int16_t		flags;
995 
996 	sc = device_get_softc(dev);
997 
998 	tl_mii_sync(sc);
999 
1000 	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1001 
1002 	for (i = 0; i < MII_NPHY; i++)
1003 		tl_miibus_writereg(dev, i, MII_BMCR, flags);
1004 
1005 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1006 	DELAY(50000);
1007 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1008 	tl_mii_sync(sc);
1009 	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1010 
1011 	DELAY(50000);
1012 	return;
1013 }
1014 
1015 static void tl_softreset(sc, internal)
1016 	struct tl_softc		*sc;
1017 	int			internal;
1018 {
1019         u_int32_t               cmd, dummy, i;
1020 
1021         /* Assert the adapter reset bit. */
1022 	CMD_SET(sc, TL_CMD_ADRST);
1023 
1024         /* Turn off interrupts */
1025 	CMD_SET(sc, TL_CMD_INTSOFF);
1026 
1027 	/* First, clear the stats registers. */
1028 	for (i = 0; i < 5; i++)
1029 		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1030 
1031         /* Clear Areg and Hash registers */
1032 	for (i = 0; i < 8; i++)
1033 		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1034 
1035         /*
1036 	 * Set up Netconfig register. Enable one channel and
1037 	 * one fragment mode.
1038 	 */
1039 	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1040 	if (internal && !sc->tl_bitrate) {
1041 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1042 	} else {
1043 		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1044 	}
1045 
1046 	/* Handle cards with bitrate devices. */
1047 	if (sc->tl_bitrate)
1048 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1049 
1050 	/*
1051 	 * Load adapter irq pacing timer and tx threshold.
1052 	 * We make the transmit threshold 1 initially but we may
1053 	 * change that later.
1054 	 */
1055 	cmd = CSR_READ_4(sc, TL_HOSTCMD);
1056 	cmd |= TL_CMD_NES;
1057 	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1058 	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1059 	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1060 
1061         /* Unreset the MII */
1062 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1063 
1064 	/* Take the adapter out of reset */
1065 	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1066 
1067 	/* Wait for things to settle down a little. */
1068 	DELAY(500);
1069 
1070         return;
1071 }
1072 
1073 /*
1074  * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1075  * against our list and return its name if we find a match.
1076  */
1077 static int tl_probe(dev)
1078 	device_t		dev;
1079 {
1080 	struct tl_type		*t;
1081 
1082 	t = tl_devs;
1083 
1084 	while(t->tl_name != NULL) {
1085 		if ((pci_get_vendor(dev) == t->tl_vid) &&
1086 		    (pci_get_device(dev) == t->tl_did)) {
1087 			device_set_desc(dev, t->tl_name);
1088 			return(0);
1089 		}
1090 		t++;
1091 	}
1092 
1093 	return(ENXIO);
1094 }
1095 
1096 static int tl_attach(dev)
1097 	device_t		dev;
1098 {
1099 	int			i;
1100 	u_int16_t		did, vid;
1101 	struct tl_type		*t;
1102 	struct ifnet		*ifp;
1103 	struct tl_softc		*sc;
1104 	int			error = 0, rid;
1105 	uint8_t			eaddr[ETHER_ADDR_LEN];
1106 
1107 	vid = pci_get_vendor(dev);
1108 	did = pci_get_device(dev);
1109 	sc = device_get_softc(dev);
1110 
1111 	t = tl_devs;
1112 	while(t->tl_name != NULL) {
1113 		if (vid == t->tl_vid && did == t->tl_did)
1114 			break;
1115 		t++;
1116 	}
1117 
1118 	KKASSERT(t->tl_name != NULL);
1119 
1120 	pci_enable_busmaster(dev);
1121 
1122 #ifdef TL_USEIOSPACE
1123 	rid = TL_PCI_LOIO;
1124 	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1125 		RF_ACTIVE);
1126 
1127 	/*
1128 	 * Some cards have the I/O and memory mapped address registers
1129 	 * reversed. Try both combinations before giving up.
1130 	 */
1131 	if (sc->tl_res == NULL) {
1132 		rid = TL_PCI_LOMEM;
1133 		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1134 		    RF_ACTIVE);
1135 	}
1136 #else
1137 	rid = TL_PCI_LOMEM;
1138 	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1139 	    RF_ACTIVE);
1140 	if (sc->tl_res == NULL) {
1141 		rid = TL_PCI_LOIO;
1142 		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1143 		    RF_ACTIVE);
1144 	}
1145 #endif
1146 
1147 	if (sc->tl_res == NULL) {
1148 		device_printf(dev, "couldn't map ports/memory\n");
1149 		error = ENXIO;
1150 		return(error);
1151 	}
1152 
1153 	sc->tl_btag = rman_get_bustag(sc->tl_res);
1154 	sc->tl_bhandle = rman_get_bushandle(sc->tl_res);
1155 
1156 #ifdef notdef
1157 	/*
1158 	 * The ThunderLAN manual suggests jacking the PCI latency
1159 	 * timer all the way up to its maximum value. I'm not sure
1160 	 * if this is really necessary, but what the manual wants,
1161 	 * the manual gets.
1162 	 */
1163 	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1164 	command |= 0x0000FF00;
1165 	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1166 #endif
1167 
1168 	/* Allocate interrupt */
1169 	rid = 0;
1170 	sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1171 	    RF_SHAREABLE | RF_ACTIVE);
1172 
1173 	if (sc->tl_irq == NULL) {
1174 		device_printf(dev, "couldn't map interrupt\n");
1175 		error = ENXIO;
1176 		goto fail;
1177 	}
1178 
1179 	/*
1180 	 * Now allocate memory for the TX and RX lists.
1181 	 */
1182 	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1183 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
1184 
1185 	if (sc->tl_ldata == NULL) {
1186 		device_printf(dev, "no memory for list buffers!\n");
1187 		error = ENXIO;
1188 		goto fail;
1189 	}
1190 
1191 	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1192 
1193 	sc->tl_dinfo = t;
1194 	if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID)
1195 		sc->tl_eeaddr = TL_EEPROM_EADDR;
1196 	if (t->tl_vid == OLICOM_VENDORID)
1197 		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1198 
1199 	/* Reset the adapter. */
1200 	tl_softreset(sc, 1);
1201 	tl_hardreset(dev);
1202 	tl_softreset(sc, 1);
1203 
1204 	ifp = &sc->arpcom.ac_if;
1205 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1206 
1207 	/*
1208 	 * Get station address from the EEPROM.
1209 	 */
1210 	if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1211 		device_printf(dev, "failed to read station address\n");
1212 		error = ENXIO;
1213 		goto fail;
1214 	}
1215 
1216         /*
1217          * XXX Olicom, in its desire to be different from the
1218          * rest of the world, has done strange things with the
1219          * encoding of the station address in the EEPROM. First
1220          * of all, they store the address at offset 0xF8 rather
1221          * than at 0x83 like the ThunderLAN manual suggests.
1222          * Second, they store the address in three 16-bit words in
1223          * network byte order, as opposed to storing it sequentially
1224          * like all the other ThunderLAN cards. In order to get
1225          * the station address in a form that matches what the Olicom
1226          * diagnostic utility specifies, we have to byte-swap each
1227          * word. To make things even more confusing, neither 00:00:28
1228          * nor 00:00:24 appear in the IEEE OUI database.
1229          */
1230         if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) {
1231                 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1232                         u_int16_t               *p;
1233                         p = (u_int16_t *)&eaddr[i];
1234                         *p = ntohs(*p);
1235                 }
1236         }
1237 
1238 	ifp->if_softc = sc;
1239 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1240 	ifp->if_ioctl = tl_ioctl;
1241 	ifp->if_start = tl_start;
1242 	ifp->if_watchdog = tl_watchdog;
1243 	ifp->if_init = tl_init;
1244 	ifp->if_mtu = ETHERMTU;
1245 	ifq_set_maxlen(&ifp->if_snd, TL_TX_LIST_CNT - 1);
1246 	ifq_set_ready(&ifp->if_snd);
1247 	callout_init(&sc->tl_stat_timer);
1248 
1249 	/* Reset the adapter again. */
1250 	tl_softreset(sc, 1);
1251 	tl_hardreset(dev);
1252 	tl_softreset(sc, 1);
1253 
1254 	/*
1255 	 * Do MII setup. If no PHYs are found, then this is a
1256 	 * bitrate ThunderLAN chip that only supports 10baseT
1257 	 * and AUI/BNC.
1258 	 */
1259 	if (mii_phy_probe(dev, &sc->tl_miibus,
1260 	    tl_ifmedia_upd, tl_ifmedia_sts)) {
1261 		struct ifmedia		*ifm;
1262 		sc->tl_bitrate = 1;
1263 		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1264 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1265 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1266 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1267 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1268 		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1269 		/* Reset again, this time setting bitrate mode. */
1270 		tl_softreset(sc, 1);
1271 		ifm = &sc->ifmedia;
1272 		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1273 		tl_ifmedia_upd(ifp);
1274 	}
1275 
1276 	/*
1277 	 * Call MI attach routine.
1278 	 */
1279 	ether_ifattach(ifp, eaddr, NULL);
1280 
1281 	error = bus_setup_intr(dev, sc->tl_irq, INTR_NETSAFE,
1282 			       tl_intr, sc, &sc->tl_intrhand,
1283 			       ifp->if_serializer);
1284 
1285 	if (error) {
1286 		ether_ifdetach(ifp);
1287 		device_printf(dev, "couldn't set up irq\n");
1288 		goto fail;
1289 	}
1290 
1291 	return(0);
1292 
1293 fail:
1294 	tl_detach(dev);
1295 	return(error);
1296 }
1297 
1298 static int tl_detach(dev)
1299 	device_t		dev;
1300 {
1301 	struct tl_softc *sc = device_get_softc(dev);
1302 	struct ifnet *ifp = &sc->arpcom.ac_if;
1303 
1304 	lwkt_serialize_enter(ifp->if_serializer);
1305 
1306 	if (device_is_attached(dev)) {
1307 		tl_stop(sc);
1308 		ether_ifdetach(ifp);
1309 	}
1310 
1311 	if (sc->tl_miibus)
1312 		device_delete_child(dev, sc->tl_miibus);
1313 	bus_generic_detach(dev);
1314 
1315 	if (sc->tl_ldata)
1316 		contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1317 	if (sc->tl_bitrate)
1318 		ifmedia_removeall(&sc->ifmedia);
1319 	if (sc->tl_intrhand)
1320 		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1321 	if (sc->tl_irq)
1322 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1323 	if (sc->tl_res)
1324 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1325 
1326 	lwkt_serialize_exit(ifp->if_serializer);
1327 	return(0);
1328 }
1329 
1330 /*
1331  * Initialize the transmit lists.
1332  */
1333 static int tl_list_tx_init(sc)
1334 	struct tl_softc		*sc;
1335 {
1336 	struct tl_chain_data	*cd;
1337 	struct tl_list_data	*ld;
1338 	int			i;
1339 
1340 	cd = &sc->tl_cdata;
1341 	ld = sc->tl_ldata;
1342 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1343 		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1344 		if (i == (TL_TX_LIST_CNT - 1))
1345 			cd->tl_tx_chain[i].tl_next = NULL;
1346 		else
1347 			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1348 	}
1349 
1350 	cd->tl_tx_free = &cd->tl_tx_chain[0];
1351 	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1352 	sc->tl_txeoc = 1;
1353 
1354 	return(0);
1355 }
1356 
1357 /*
1358  * Initialize the RX lists and allocate mbufs for them.
1359  */
1360 static int tl_list_rx_init(sc)
1361 	struct tl_softc		*sc;
1362 {
1363 	struct tl_chain_data	*cd;
1364 	struct tl_list_data	*ld;
1365 	int			i;
1366 
1367 	cd = &sc->tl_cdata;
1368 	ld = sc->tl_ldata;
1369 
1370 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1371 		cd->tl_rx_chain[i].tl_ptr =
1372 			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1373 		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1374 			return(ENOBUFS);
1375 		if (i == (TL_RX_LIST_CNT - 1)) {
1376 			cd->tl_rx_chain[i].tl_next = NULL;
1377 			ld->tl_rx_list[i].tlist_fptr = 0;
1378 		} else {
1379 			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1380 			ld->tl_rx_list[i].tlist_fptr =
1381 					vtophys(&ld->tl_rx_list[i + 1]);
1382 		}
1383 	}
1384 
1385 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1386 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1387 
1388 	return(0);
1389 }
1390 
1391 static int tl_newbuf(sc, c)
1392 	struct tl_softc		*sc;
1393 	struct tl_chain_onefrag	*c;
1394 {
1395 	struct mbuf *m_new;
1396 
1397 	m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1398 	if (m_new == NULL)
1399 		return (ENOBUFS);
1400 
1401 	c->tl_mbuf = m_new;
1402 	c->tl_next = NULL;
1403 	c->tl_ptr->tlist_frsize = MCLBYTES;
1404 	c->tl_ptr->tlist_fptr = 0;
1405 	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1406 	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1407 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1408 
1409 	return(0);
1410 }
1411 /*
1412  * Interrupt handler for RX 'end of frame' condition (EOF). This
1413  * tells us that a full ethernet frame has been captured and we need
1414  * to handle it.
1415  *
1416  * Reception is done using 'lists' which consist of a header and a
1417  * series of 10 data count/data address pairs that point to buffers.
1418  * Initially you're supposed to create a list, populate it with pointers
1419  * to buffers, then load the physical address of the list into the
1420  * ch_parm register. The adapter is then supposed to DMA the received
1421  * frame into the buffers for you.
1422  *
1423  * To make things as fast as possible, we have the chip DMA directly
1424  * into mbufs. This saves us from having to do a buffer copy: we can
1425  * just hand the mbufs directly to ether_input(). Once the frame has
1426  * been sent on its way, the 'list' structure is assigned a new buffer
1427  * and moved to the end of the RX chain. As long we we stay ahead of
1428  * the chip, it will always think it has an endless receive channel.
1429  *
1430  * If we happen to fall behind and the chip manages to fill up all of
1431  * the buffers, it will generate an end of channel interrupt and wait
1432  * for us to empty the chain and restart the receiver.
1433  */
1434 static int tl_intvec_rxeof(xsc, type)
1435 	void			*xsc;
1436 	u_int32_t		type;
1437 {
1438 	struct tl_softc		*sc;
1439 	int			r = 0, total_len = 0;
1440 	struct ether_header	*eh;
1441 	struct mbuf		*m;
1442 	struct ifnet		*ifp;
1443 	struct tl_chain_onefrag	*cur_rx;
1444 
1445 	sc = xsc;
1446 	ifp = &sc->arpcom.ac_if;
1447 
1448 	while(sc->tl_cdata.tl_rx_head != NULL) {
1449 		cur_rx = sc->tl_cdata.tl_rx_head;
1450 		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1451 			break;
1452 		r++;
1453 		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1454 		m = cur_rx->tl_mbuf;
1455 		total_len = cur_rx->tl_ptr->tlist_frsize;
1456 
1457 		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1458 			ifp->if_ierrors++;
1459 			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1460 			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1461 			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1462 			continue;
1463 		}
1464 
1465 		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1466 						vtophys(cur_rx->tl_ptr);
1467 		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1468 		sc->tl_cdata.tl_rx_tail = cur_rx;
1469 
1470 		eh = mtod(m, struct ether_header *);
1471 		m->m_pkthdr.rcvif = ifp;
1472 
1473 		/*
1474 		 * Note: when the ThunderLAN chip is in 'capture all
1475 		 * frames' mode, it will receive its own transmissions.
1476 		 * We drop don't need to process our own transmissions,
1477 		 * so we drop them here and continue.
1478 		 */
1479 		/*if (ifp->if_flags & IFF_PROMISC && */
1480 		if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1481 		 					ETHER_ADDR_LEN)) {
1482 				m_freem(m);
1483 				continue;
1484 		}
1485 
1486 		ifp->if_input(ifp, m);
1487 	}
1488 
1489 	return(r);
1490 }
1491 
1492 /*
1493  * The RX-EOC condition hits when the ch_parm address hasn't been
1494  * initialized or the adapter reached a list with a forward pointer
1495  * of 0 (which indicates the end of the chain). In our case, this means
1496  * the card has hit the end of the receive buffer chain and we need to
1497  * empty out the buffers and shift the pointer back to the beginning again.
1498  */
1499 static int tl_intvec_rxeoc(xsc, type)
1500 	void			*xsc;
1501 	u_int32_t		type;
1502 {
1503 	struct tl_softc		*sc;
1504 	int			r;
1505 	struct tl_chain_data	*cd;
1506 
1507 
1508 	sc = xsc;
1509 	cd = &sc->tl_cdata;
1510 
1511 	/* Flush out the receive queue and ack RXEOF interrupts. */
1512 	r = tl_intvec_rxeof(xsc, type);
1513 	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1514 	r = 1;
1515 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1516 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1517 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1518 	r |= (TL_CMD_GO|TL_CMD_RT);
1519 	return(r);
1520 }
1521 
1522 static int tl_intvec_txeof(xsc, type)
1523 	void			*xsc;
1524 	u_int32_t		type;
1525 {
1526 	struct tl_softc		*sc;
1527 	int			r = 0;
1528 	struct tl_chain		*cur_tx;
1529 
1530 	sc = xsc;
1531 
1532 	/*
1533 	 * Go through our tx list and free mbufs for those
1534 	 * frames that have been sent.
1535 	 */
1536 	while (sc->tl_cdata.tl_tx_head != NULL) {
1537 		cur_tx = sc->tl_cdata.tl_tx_head;
1538 		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1539 			break;
1540 		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1541 
1542 		r++;
1543 		m_freem(cur_tx->tl_mbuf);
1544 		cur_tx->tl_mbuf = NULL;
1545 
1546 		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1547 		sc->tl_cdata.tl_tx_free = cur_tx;
1548 		if (!cur_tx->tl_ptr->tlist_fptr)
1549 			break;
1550 	}
1551 
1552 	return(r);
1553 }
1554 
1555 /*
1556  * The transmit end of channel interrupt. The adapter triggers this
1557  * interrupt to tell us it hit the end of the current transmit list.
1558  *
1559  * A note about this: it's possible for a condition to arise where
1560  * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1561  * You have to avoid this since the chip expects things to go in a
1562  * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1563  * When the TXEOF handler is called, it will free all of the transmitted
1564  * frames and reset the tx_head pointer to NULL. However, a TXEOC
1565  * interrupt should be received and acknowledged before any more frames
1566  * are queued for transmission. If tl_statrt() is called after TXEOF
1567  * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1568  * it could attempt to issue a transmit command prematurely.
1569  *
1570  * To guard against this, tl_start() will only issue transmit commands
1571  * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1572  * can set this flag once tl_start() has cleared it.
1573  */
1574 static int tl_intvec_txeoc(xsc, type)
1575 	void			*xsc;
1576 	u_int32_t		type;
1577 {
1578 	struct tl_softc		*sc;
1579 	struct ifnet		*ifp;
1580 	u_int32_t		cmd;
1581 
1582 	sc = xsc;
1583 	ifp = &sc->arpcom.ac_if;
1584 
1585 	/* Clear the timeout timer. */
1586 	ifp->if_timer = 0;
1587 
1588 	if (sc->tl_cdata.tl_tx_head == NULL) {
1589 		ifp->if_flags &= ~IFF_OACTIVE;
1590 		sc->tl_cdata.tl_tx_tail = NULL;
1591 		sc->tl_txeoc = 1;
1592 	} else {
1593 		sc->tl_txeoc = 0;
1594 		/* First we have to ack the EOC interrupt. */
1595 		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1596 		/* Then load the address of the next TX list. */
1597 		CSR_WRITE_4(sc, TL_CH_PARM,
1598 		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1599 		/* Restart TX channel. */
1600 		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1601 		cmd &= ~TL_CMD_RT;
1602 		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1603 		CMD_PUT(sc, cmd);
1604 		return(0);
1605 	}
1606 
1607 	return(1);
1608 }
1609 
1610 static int tl_intvec_adchk(xsc, type)
1611 	void			*xsc;
1612 	u_int32_t		type;
1613 {
1614 	struct tl_softc		*sc;
1615 
1616 	sc = xsc;
1617 
1618 	if (type) {
1619 		if_printf(&sc->arpcom.ac_if, "adapter check: %x\n",
1620 			  (unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1621 	}
1622 
1623 	tl_softreset(sc, 1);
1624 	tl_stop(sc);
1625 	tl_init(sc);
1626 	CMD_SET(sc, TL_CMD_INTSON);
1627 
1628 	return(0);
1629 }
1630 
1631 static int tl_intvec_netsts(xsc, type)
1632 	void			*xsc;
1633 	u_int32_t		type;
1634 {
1635 	struct tl_softc		*sc;
1636 	u_int16_t		netsts;
1637 
1638 	sc = xsc;
1639 
1640 	netsts = tl_dio_read16(sc, TL_NETSTS);
1641 	tl_dio_write16(sc, TL_NETSTS, netsts);
1642 
1643 	if_printf(&sc->arpcom.ac_if, "network status: %x\n", netsts);
1644 
1645 	return(1);
1646 }
1647 
1648 static void tl_intr(xsc)
1649 	void			*xsc;
1650 {
1651 	struct tl_softc		*sc;
1652 	struct ifnet		*ifp;
1653 	int			r = 0;
1654 	u_int32_t		type = 0;
1655 	u_int16_t		ints = 0;
1656 	u_int8_t		ivec = 0;
1657 
1658 	sc = xsc;
1659 
1660 	/* Disable interrupts */
1661 	ints = CSR_READ_2(sc, TL_HOST_INT);
1662 	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1663 	type = (ints << 16) & 0xFFFF0000;
1664 	ivec = (ints & TL_VEC_MASK) >> 5;
1665 	ints = (ints & TL_INT_MASK) >> 2;
1666 
1667 	ifp = &sc->arpcom.ac_if;
1668 
1669 	switch(ints) {
1670 	case (TL_INTR_INVALID):
1671 #ifdef DIAGNOSTIC
1672 		if_printf(ifp, "got an invalid interrupt!\n");
1673 #endif
1674 		/* Re-enable interrupts but don't ack this one. */
1675 		CMD_PUT(sc, type);
1676 		r = 0;
1677 		break;
1678 	case (TL_INTR_TXEOF):
1679 		r = tl_intvec_txeof((void *)sc, type);
1680 		break;
1681 	case (TL_INTR_TXEOC):
1682 		r = tl_intvec_txeoc((void *)sc, type);
1683 		break;
1684 	case (TL_INTR_STATOFLOW):
1685 		tl_stats_update_serialized(sc);
1686 		r = 1;
1687 		break;
1688 	case (TL_INTR_RXEOF):
1689 		r = tl_intvec_rxeof((void *)sc, type);
1690 		break;
1691 	case (TL_INTR_DUMMY):
1692 		if_printf(ifp, "got a dummy interrupt\n");
1693 		r = 1;
1694 		break;
1695 	case (TL_INTR_ADCHK):
1696 		if (ivec)
1697 			r = tl_intvec_adchk((void *)sc, type);
1698 		else
1699 			r = tl_intvec_netsts((void *)sc, type);
1700 		break;
1701 	case (TL_INTR_RXEOC):
1702 		r = tl_intvec_rxeoc((void *)sc, type);
1703 		break;
1704 	default:
1705 		if_printf(ifp, "bogus interrupt type\n");
1706 		break;
1707 	}
1708 
1709 	/* Re-enable interrupts */
1710 	if (r) {
1711 		CMD_PUT(sc, TL_CMD_ACK | r | type);
1712 	}
1713 
1714 	if (!ifq_is_empty(&ifp->if_snd))
1715 		tl_start(ifp);
1716 
1717 	return;
1718 }
1719 
1720 static
1721 void
1722 tl_stats_update(void *xsc)
1723 {
1724 	struct tl_softc *sc = xsc;
1725 	struct ifnet *ifp = &sc->arpcom.ac_if;
1726 
1727 	lwkt_serialize_enter(ifp->if_serializer);
1728 	tl_stats_update_serialized(xsc);
1729 	lwkt_serialize_exit(ifp->if_serializer);
1730 }
1731 
1732 static
1733 void
1734 tl_stats_update_serialized(void *xsc)
1735 {
1736 	struct tl_softc		*sc;
1737 	struct ifnet		*ifp;
1738 	struct tl_stats		tl_stats;
1739 	struct mii_data		*mii;
1740 	u_int32_t		*p;
1741 
1742 	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1743 
1744 	sc = xsc;
1745 	ifp = &sc->arpcom.ac_if;
1746 
1747 	p = (u_int32_t *)&tl_stats;
1748 
1749 	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1750 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1751 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1752 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1753 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1754 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1755 
1756 	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1757 	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1758 				tl_stats.tl_tx_multi_collision;
1759 	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1760 	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1761 			    tl_rx_overrun(tl_stats);
1762 	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1763 
1764 	if (tl_tx_underrun(tl_stats)) {
1765 		u_int8_t		tx_thresh;
1766 		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1767 		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1768 			tx_thresh >>= 4;
1769 			tx_thresh++;
1770 			if_printf(ifp, "tx underrun -- increasing "
1771 				  "tx threshold to %d bytes\n",
1772 				  (64 * (tx_thresh * 4)));
1773 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1774 			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1775 		}
1776 	}
1777 
1778 	callout_reset(&sc->tl_stat_timer, hz, tl_stats_update, sc);
1779 
1780 	if (!sc->tl_bitrate) {
1781 		mii = device_get_softc(sc->tl_miibus);
1782 		mii_tick(mii);
1783 	}
1784 }
1785 
1786 /*
1787  * Encapsulate an mbuf chain in a list by coupling the mbuf data
1788  * pointers to the fragment pointers.
1789  */
1790 static int tl_encap(sc, c, m_head)
1791 	struct tl_softc		*sc;
1792 	struct tl_chain		*c;
1793 	struct mbuf		*m_head;
1794 {
1795 	int			frag = 0;
1796 	struct tl_frag		*f = NULL;
1797 	int			total_len;
1798 	struct mbuf		*m;
1799 
1800 	/*
1801  	 * Start packing the mbufs in this chain into
1802 	 * the fragment pointers. Stop when we run out
1803  	 * of fragments or hit the end of the mbuf chain.
1804 	 */
1805 	m = m_head;
1806 	total_len = 0;
1807 
1808 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1809 		if (m->m_len != 0) {
1810 			if (frag == TL_MAXFRAGS)
1811 				break;
1812 			total_len+= m->m_len;
1813 			c->tl_ptr->tl_frag[frag].tlist_dadr =
1814 				vtophys(mtod(m, vm_offset_t));
1815 			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1816 			frag++;
1817 		}
1818 	}
1819 
1820 	/*
1821 	 * Handle special cases.
1822 	 * Special case #1: we used up all 10 fragments, but
1823 	 * we have more mbufs left in the chain. Copy the
1824 	 * data into an mbuf cluster. Note that we don't
1825 	 * bother clearing the values in the other fragment
1826 	 * pointers/counters; it wouldn't gain us anything,
1827 	 * and would waste cycles.
1828 	 */
1829 	if (m != NULL) {
1830 		struct mbuf *m_new;
1831 
1832 		m_new = m_getl(m_head->m_pkthdr.len, MB_DONTWAIT, MT_DATA,
1833 			       M_PKTHDR, NULL);
1834 		if (m_new == NULL) {
1835 			if_printf(&sc->arpcom.ac_if, "no memory for tx list\n");
1836 			return (1);
1837 		}
1838 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1839 					mtod(m_new, caddr_t));
1840 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1841 		m_freem(m_head);
1842 		m_head = m_new;
1843 		f = &c->tl_ptr->tl_frag[0];
1844 		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1845 		f->tlist_dcnt = total_len = m_new->m_len;
1846 		frag = 1;
1847 	}
1848 
1849 	/*
1850 	 * Special case #2: the frame is smaller than the minimum
1851 	 * frame size. We have to pad it to make the chip happy.
1852 	 */
1853 	if (total_len < TL_MIN_FRAMELEN) {
1854 		if (frag == TL_MAXFRAGS) {
1855 			if_printf(&sc->arpcom.ac_if, "all frags filled but "
1856 				  "frame still to small!\n");
1857 		}
1858 		f = &c->tl_ptr->tl_frag[frag];
1859 		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1860 		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1861 		total_len += f->tlist_dcnt;
1862 		frag++;
1863 	}
1864 
1865 	c->tl_mbuf = m_head;
1866 	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1867 	c->tl_ptr->tlist_frsize = total_len;
1868 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1869 	c->tl_ptr->tlist_fptr = 0;
1870 
1871 	return(0);
1872 }
1873 
1874 /*
1875  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1876  * to the mbuf data regions directly in the transmit lists. We also save a
1877  * copy of the pointers since the transmit list fragment pointers are
1878  * physical addresses.
1879  */
1880 static void tl_start(ifp)
1881 	struct ifnet		*ifp;
1882 {
1883 	struct tl_softc		*sc;
1884 	struct mbuf		*m_head = NULL;
1885 	u_int32_t		cmd;
1886 	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1887 
1888 	sc = ifp->if_softc;
1889 
1890 	/*
1891 	 * Check for an available queue slot. If there are none,
1892 	 * punt.
1893 	 */
1894 	if (sc->tl_cdata.tl_tx_free == NULL) {
1895 		ifp->if_flags |= IFF_OACTIVE;
1896 		return;
1897 	}
1898 
1899 	start_tx = sc->tl_cdata.tl_tx_free;
1900 
1901 	while(sc->tl_cdata.tl_tx_free != NULL) {
1902 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1903 		if (m_head == NULL)
1904 			break;
1905 
1906 		/* Pick a chain member off the free list. */
1907 		cur_tx = sc->tl_cdata.tl_tx_free;
1908 		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1909 
1910 		cur_tx->tl_next = NULL;
1911 
1912 		/* Pack the data into the list. */
1913 		tl_encap(sc, cur_tx, m_head);
1914 
1915 		/* Chain it together */
1916 		if (prev != NULL) {
1917 			prev->tl_next = cur_tx;
1918 			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1919 		}
1920 		prev = cur_tx;
1921 
1922 		BPF_MTAP(ifp, cur_tx->tl_mbuf);
1923 	}
1924 
1925 	/*
1926 	 * If there are no packets queued, bail.
1927 	 */
1928 	if (cur_tx == NULL)
1929 		return;
1930 
1931 	/*
1932 	 * That's all we can stands, we can't stands no more.
1933 	 * If there are no other transfers pending, then issue the
1934 	 * TX GO command to the adapter to start things moving.
1935 	 * Otherwise, just leave the data in the queue and let
1936 	 * the EOF/EOC interrupt handler send.
1937 	 */
1938 	if (sc->tl_cdata.tl_tx_head == NULL) {
1939 		sc->tl_cdata.tl_tx_head = start_tx;
1940 		sc->tl_cdata.tl_tx_tail = cur_tx;
1941 
1942 		if (sc->tl_txeoc) {
1943 			sc->tl_txeoc = 0;
1944 			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
1945 			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1946 			cmd &= ~TL_CMD_RT;
1947 			cmd |= TL_CMD_GO|TL_CMD_INTSON;
1948 			CMD_PUT(sc, cmd);
1949 		}
1950 	} else {
1951 		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1952 		sc->tl_cdata.tl_tx_tail = cur_tx;
1953 	}
1954 
1955 	/*
1956 	 * Set a timeout in case the chip goes out to lunch.
1957 	 */
1958 	ifp->if_timer = 5;
1959 
1960 	return;
1961 }
1962 
1963 static void tl_init(xsc)
1964 	void			*xsc;
1965 {
1966 	struct tl_softc		*sc = xsc;
1967 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1968 	struct mii_data		*mii;
1969 
1970 	/*
1971 	 * Cancel pending I/O.
1972 	 */
1973 	tl_stop(sc);
1974 
1975 	/* Initialize TX FIFO threshold */
1976 	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1977 	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1978 
1979         /* Set PCI burst size */
1980 	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
1981 
1982 	/*
1983 	 * Set 'capture all frames' bit for promiscuous mode.
1984 	 */
1985 	if (ifp->if_flags & IFF_PROMISC)
1986 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
1987 	else
1988 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
1989 
1990 	/*
1991 	 * Set capture broadcast bit to capture broadcast frames.
1992 	 */
1993 	if (ifp->if_flags & IFF_BROADCAST)
1994 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
1995 	else
1996 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
1997 
1998 	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
1999 
2000 	/* Init our MAC address */
2001 	tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
2002 
2003 	/* Init multicast filter, if needed. */
2004 	tl_setmulti(sc);
2005 
2006 	/* Init circular RX list. */
2007 	if (tl_list_rx_init(sc) == ENOBUFS) {
2008 		if_printf(ifp, "initialization failed: no "
2009 			  "memory for rx buffers\n");
2010 		tl_stop(sc);
2011 		return;
2012 	}
2013 
2014 	/* Init TX pointers. */
2015 	tl_list_tx_init(sc);
2016 
2017 	/* Enable PCI interrupts. */
2018 	CMD_SET(sc, TL_CMD_INTSON);
2019 
2020 	/* Load the address of the rx list */
2021 	CMD_SET(sc, TL_CMD_RT);
2022 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2023 
2024 	if (!sc->tl_bitrate) {
2025 		if (sc->tl_miibus != NULL) {
2026 			mii = device_get_softc(sc->tl_miibus);
2027 			mii_mediachg(mii);
2028 		}
2029 	}
2030 
2031 	/* Send the RX go command */
2032 	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2033 
2034 	ifp->if_flags |= IFF_RUNNING;
2035 	ifp->if_flags &= ~IFF_OACTIVE;
2036 
2037 	/* Start the stats update counter */
2038 	callout_reset(&sc->tl_stat_timer, hz, tl_stats_update, sc);
2039 }
2040 
2041 /*
2042  * Set media options.
2043  */
2044 static int tl_ifmedia_upd(ifp)
2045 	struct ifnet		*ifp;
2046 {
2047 	struct tl_softc		*sc;
2048 	struct mii_data		*mii = NULL;
2049 
2050 	sc = ifp->if_softc;
2051 
2052 	if (sc->tl_bitrate)
2053 		tl_setmode(sc, sc->ifmedia.ifm_media);
2054 	else {
2055 		mii = device_get_softc(sc->tl_miibus);
2056 		mii_mediachg(mii);
2057 	}
2058 
2059 	return(0);
2060 }
2061 
2062 /*
2063  * Report current media status.
2064  */
2065 static void tl_ifmedia_sts(ifp, ifmr)
2066 	struct ifnet		*ifp;
2067 	struct ifmediareq	*ifmr;
2068 {
2069 	struct tl_softc		*sc;
2070 	struct mii_data		*mii;
2071 
2072 	sc = ifp->if_softc;
2073 
2074 	ifmr->ifm_active = IFM_ETHER;
2075 
2076 	if (sc->tl_bitrate) {
2077 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2078 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2079 		else
2080 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2081 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2082 			ifmr->ifm_active |= IFM_HDX;
2083 		else
2084 			ifmr->ifm_active |= IFM_FDX;
2085 		return;
2086 	} else {
2087 		mii = device_get_softc(sc->tl_miibus);
2088 		mii_pollstat(mii);
2089 		ifmr->ifm_active = mii->mii_media_active;
2090 		ifmr->ifm_status = mii->mii_media_status;
2091 	}
2092 
2093 	return;
2094 }
2095 
2096 static int tl_ioctl(ifp, command, data, cr)
2097 	struct ifnet		*ifp;
2098 	u_long			command;
2099 	caddr_t			data;
2100 	struct ucred		*cr;
2101 {
2102 	struct tl_softc		*sc = ifp->if_softc;
2103 	struct ifreq		*ifr = (struct ifreq *) data;
2104 	int			error = 0;
2105 
2106 	switch(command) {
2107 	case SIOCSIFFLAGS:
2108 		if (ifp->if_flags & IFF_UP) {
2109 			if (ifp->if_flags & IFF_RUNNING &&
2110 			    ifp->if_flags & IFF_PROMISC &&
2111 			    !(sc->tl_if_flags & IFF_PROMISC)) {
2112 				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2113 				tl_setmulti(sc);
2114 			} else if (ifp->if_flags & IFF_RUNNING &&
2115 			    !(ifp->if_flags & IFF_PROMISC) &&
2116 			    sc->tl_if_flags & IFF_PROMISC) {
2117 				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2118 				tl_setmulti(sc);
2119 			} else
2120 				tl_init(sc);
2121 		} else {
2122 			if (ifp->if_flags & IFF_RUNNING) {
2123 				tl_stop(sc);
2124 			}
2125 		}
2126 		sc->tl_if_flags = ifp->if_flags;
2127 		error = 0;
2128 		break;
2129 	case SIOCADDMULTI:
2130 	case SIOCDELMULTI:
2131 		tl_setmulti(sc);
2132 		error = 0;
2133 		break;
2134 	case SIOCSIFMEDIA:
2135 	case SIOCGIFMEDIA:
2136 		if (sc->tl_bitrate)
2137 			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2138 		else {
2139 			struct mii_data		*mii;
2140 			mii = device_get_softc(sc->tl_miibus);
2141 			error = ifmedia_ioctl(ifp, ifr,
2142 			    &mii->mii_media, command);
2143 		}
2144 		break;
2145 	default:
2146 		error = ether_ioctl(ifp, command, data);
2147 		break;
2148 	}
2149 	return(error);
2150 }
2151 
2152 static void tl_watchdog(ifp)
2153 	struct ifnet		*ifp;
2154 {
2155 	struct tl_softc		*sc;
2156 
2157 	sc = ifp->if_softc;
2158 
2159 	if_printf(ifp, "device timeout\n");
2160 
2161 	ifp->if_oerrors++;
2162 
2163 	tl_softreset(sc, 1);
2164 	tl_init(sc);
2165 
2166 	return;
2167 }
2168 
2169 /*
2170  * Stop the adapter and free any mbufs allocated to the
2171  * RX and TX lists.
2172  */
2173 static void tl_stop(sc)
2174 	struct tl_softc		*sc;
2175 {
2176 	int		i;
2177 	struct ifnet		*ifp;
2178 
2179 	ifp = &sc->arpcom.ac_if;
2180 
2181 	/* Stop the stats updater. */
2182 	callout_stop(&sc->tl_stat_timer);
2183 
2184 	/* Stop the transmitter */
2185 	CMD_CLR(sc, TL_CMD_RT);
2186 	CMD_SET(sc, TL_CMD_STOP);
2187 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2188 
2189 	/* Stop the receiver */
2190 	CMD_SET(sc, TL_CMD_RT);
2191 	CMD_SET(sc, TL_CMD_STOP);
2192 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2193 
2194 	/*
2195 	 * Disable host interrupts.
2196 	 */
2197 	CMD_SET(sc, TL_CMD_INTSOFF);
2198 
2199 	/*
2200 	 * Clear list pointer.
2201 	 */
2202 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2203 
2204 	/*
2205 	 * Free the RX lists.
2206 	 */
2207 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2208 		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2209 			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2210 			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2211 		}
2212 	}
2213 	bzero((char *)&sc->tl_ldata->tl_rx_list,
2214 		sizeof(sc->tl_ldata->tl_rx_list));
2215 
2216 	/*
2217 	 * Free the TX list buffers.
2218 	 */
2219 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2220 		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2221 			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2222 			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2223 		}
2224 	}
2225 	bzero((char *)&sc->tl_ldata->tl_tx_list,
2226 		sizeof(sc->tl_ldata->tl_tx_list));
2227 
2228 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2229 
2230 	return;
2231 }
2232 
2233 /*
2234  * Stop all chip I/O so that the kernel's probe routines don't
2235  * get confused by errant DMAs when rebooting.
2236  */
2237 static void tl_shutdown(dev)
2238 	device_t		dev;
2239 {
2240 	struct tl_softc		*sc;
2241 
2242 	sc = device_get_softc(dev);
2243 
2244 	tl_stop(sc);
2245 
2246 	return;
2247 }
2248