xref: /openbsd-src/sys/dev/pci/if_tl.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_tl.c,v 1.59 2014/01/31 08:58:32 brad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_tl.c,v 1.64 2001/02/06 10:11:48 phk Exp $
35  */
36 
37 /*
38  * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
39  * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
40  * the National Semiconductor DP83840A physical interface and the
41  * Microchip Technology 24Cxx series serial EEPROM.
42  *
43  * Written using the following four documents:
44  *
45  * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
46  * National Semiconductor DP83840A data sheet (www.national.com)
47  * Microchip Technology 24C02C data sheet (www.microchip.com)
48  * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
49  *
50  * Written by Bill Paul <wpaul@ctr.columbia.edu>
51  * Electrical Engineering Department
52  * Columbia University, New York City
53  */
54 
55 /*
56  * Some notes about the ThunderLAN:
57  *
58  * The ThunderLAN controller is a single chip containing PCI controller
59  * logic, approximately 3K of on-board SRAM, a LAN controller, and media
60  * independent interface (MII) bus. The MII allows the ThunderLAN chip to
61  * control up to 32 different physical interfaces (PHYs). The ThunderLAN
62  * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
63  * to act as a complete ethernet interface.
64  *
65  * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
66  * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
67  * in full or half duplex. Some of the Compaq Deskpro machines use a
68  * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
69  * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
70  * concert with the ThunderLAN's internal PHY to provide full 10/100
71  * support. This is cheaper than using a standalone external PHY for both
72  * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
73  * A serial EEPROM is also attached to the ThunderLAN chip to provide
74  * power-up default register settings and for storing the adapter's
75  * station address. Although not supported by this driver, the ThunderLAN
76  * chip can also be connected to token ring PHYs.
77  *
78  * The ThunderLAN has a set of registers which can be used to issue
79  * commands, acknowledge interrupts, and to manipulate other internal
80  * registers on its DIO bus. The primary registers can be accessed
81  * using either programmed I/O (inb/outb) or via PCI memory mapping,
82  * depending on how the card is configured during the PCI probing
83  * phase. It is even possible to have both PIO and memory mapped
84  * access turned on at the same time.
85  *
86  * Frame reception and transmission with the ThunderLAN chip is done
87  * using frame 'lists.' A list structure looks more or less like this:
88  *
89  * struct tl_frag {
90  *	u_int32_t		fragment_address;
91  *	u_int32_t		fragment_size;
92  * };
93  * struct tl_list {
94  *	u_int32_t		forward_pointer;
95  *	u_int16_t		cstat;
96  *	u_int16_t		frame_size;
97  *	struct tl_frag		fragments[10];
98  * };
99  *
100  * The forward pointer in the list header can be either a 0 or the address
101  * of another list, which allows several lists to be linked together. Each
102  * list contains up to 10 fragment descriptors. This means the chip allows
103  * ethernet frames to be broken up into up to 10 chunks for transfer to
104  * and from the SRAM. Note that the forward pointer and fragment buffer
105  * addresses are physical memory addresses, not virtual. Note also that
106  * a single ethernet frame can not span lists: if the host wants to
107  * transmit a frame and the frame data is split up over more than 10
108  * buffers, the frame has to collapsed before it can be transmitted.
109  *
110  * To receive frames, the driver sets up a number of lists and populates
111  * the fragment descriptors, then it sends an RX GO command to the chip.
112  * When a frame is received, the chip will DMA it into the memory regions
113  * specified by the fragment descriptors and then trigger an RX 'end of
114  * frame interrupt' when done. The driver may choose to use only one
115  * fragment per list; this may result is slighltly less efficient use
116  * of memory in exchange for improving performance.
117  *
118  * To transmit frames, the driver again sets up lists and fragment
119  * descriptors, only this time the buffers contain frame data that
120  * is to be DMA'ed into the chip instead of out of it. Once the chip
121  * has transferred the data into its on-board SRAM, it will trigger a
122  * TX 'end of frame' interrupt. It will also generate an 'end of channel'
123  * interrupt when it reaches the end of the list.
124  */
125 
126 /*
127  * Some notes about this driver:
128  *
129  * The ThunderLAN chip provides a couple of different ways to organize
130  * reception, transmission and interrupt handling. The simplest approach
131  * is to use one list each for transmission and reception. In this mode,
132  * the ThunderLAN will generate two interrupts for every received frame
133  * (one RX EOF and one RX EOC) and two for each transmitted frame (one
134  * TX EOF and one TX EOC). This may make the driver simpler but it hurts
135  * performance to have to handle so many interrupts.
136  *
137  * Initially I wanted to create a circular list of receive buffers so
138  * that the ThunderLAN chip would think there was an infinitely long
139  * receive channel and never deliver an RXEOC interrupt. However this
140  * doesn't work correctly under heavy load: while the manual says the
141  * chip will trigger an RXEOF interrupt each time a frame is copied into
142  * memory, you can't count on the chip waiting around for you to acknowledge
143  * the interrupt before it starts trying to DMA the next frame. The result
144  * is that the chip might traverse the entire circular list and then wrap
145  * around before you have a chance to do anything about it. Consequently,
146  * the receive list is terminated (with a 0 in the forward pointer in the
147  * last element). Each time an RXEOF interrupt arrives, the used list
148  * is shifted to the end of the list. This gives the appearance of an
149  * infinitely large RX chain so long as the driver doesn't fall behind
150  * the chip and allow all of the lists to be filled up.
151  *
152  * If all the lists are filled, the adapter will deliver an RX 'end of
153  * channel' interrupt when it hits the 0 forward pointer at the end of
154  * the chain. The RXEOC handler then cleans out the RX chain and resets
155  * the list head pointer in the ch_parm register and restarts the receiver.
156  *
157  * For frame transmission, it is possible to program the ThunderLAN's
158  * transmit interrupt threshold so that the chip can acknowledge multiple
159  * lists with only a single TX EOF interrupt. This allows the driver to
160  * queue several frames in one shot, and only have to handle a total
161  * two interrupts (one TX EOF and one TX EOC) no matter how many frames
162  * are transmitted. Frame transmission is done directly out of the
163  * mbufs passed to the tl_start() routine via the interface send queue.
164  * The driver simply sets up the fragment descriptors in the transmit
165  * lists to point to the mbuf data regions and sends a TX GO command.
166  *
167  * Note that since the RX and TX lists themselves are always used
168  * only by the driver, the are malloc()ed once at driver initialization
169  * time and never free()ed.
170  *
171  * Also, in order to remain as platform independent as possible, this
172  * driver uses memory mapped register access to manipulate the card
173  * as opposed to programmed I/O. This avoids the use of the inb/outb
174  * (and related) instructions which are specific to the i386 platform.
175  *
176  * Using these techniques, this driver achieves very high performance
177  * by minimizing the amount of interrupts generated during large
178  * transfers and by completely avoiding buffer copies. Frame transfer
179  * to and from the ThunderLAN chip is performed entirely by the chip
180  * itself thereby reducing the load on the host CPU.
181  */
182 
183 #include "bpfilter.h"
184 
185 #include <sys/param.h>
186 #include <sys/systm.h>
187 #include <sys/sockio.h>
188 #include <sys/mbuf.h>
189 #include <sys/malloc.h>
190 #include <sys/kernel.h>
191 #include <sys/socket.h>
192 #include <sys/device.h>
193 #include <sys/timeout.h>
194 
195 #include <net/if.h>
196 
197 #ifdef INET
198 #include <netinet/in.h>
199 #include <netinet/in_systm.h>
200 #include <netinet/ip.h>
201 #include <netinet/if_ether.h>
202 #endif
203 
204 #include <net/if_dl.h>
205 #include <net/if_media.h>
206 
207 #if NBPFILTER > 0
208 #include <net/bpf.h>
209 #endif
210 
211 #include <uvm/uvm_extern.h>              /* for vtophys */
212 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
213 
214 #include <dev/mii/mii.h>
215 #include <dev/mii/miivar.h>
216 
217 #include <dev/pci/pcireg.h>
218 #include <dev/pci/pcivar.h>
219 #include <dev/pci/pcidevs.h>
220 
221 /*
222  * Default to using PIO register access mode to pacify certain
223  * laptop docking stations with built-in ThunderLAN chips that
224  * don't seem to handle memory mapped mode properly.
225  */
226 #define TL_USEIOSPACE
227 
228 #include <dev/pci/if_tlreg.h>
229 #include <dev/mii/tlphyvar.h>
230 
231 const struct tl_products tl_prods[] = {
232 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T },
233 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5 },
234 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2 },
235 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
236 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
237 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
238 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2 },
239 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5 },
240 	{ PCI_VENDOR_TI, PCI_PRODUCT_TI_TLAN, 0 },
241 	{ 0, 0, 0 }
242 };
243 
244 int tl_probe(struct device *, void *, void *);
245 void tl_attach(struct device *, struct device *, void *);
246 void tl_wait_up(void *);
247 int tl_intvec_rxeoc(void *, u_int32_t);
248 int tl_intvec_txeoc(void *, u_int32_t);
249 int tl_intvec_txeof(void *, u_int32_t);
250 int tl_intvec_rxeof(void *, u_int32_t);
251 int tl_intvec_adchk(void *, u_int32_t);
252 int tl_intvec_netsts(void *, u_int32_t);
253 
254 int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
255 void tl_stats_update(void *);
256 int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
257 
258 int tl_intr(void *);
259 void tl_start(struct ifnet *);
260 int tl_ioctl(struct ifnet *, u_long, caddr_t);
261 void tl_init(void *);
262 void tl_stop(struct tl_softc *);
263 void tl_watchdog(struct ifnet *);
264 int tl_ifmedia_upd(struct ifnet *);
265 void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
266 
267 u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
268 u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
269 int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
270 
271 void tl_mii_sync(struct tl_softc *);
272 void tl_mii_send(struct tl_softc *, u_int32_t, int);
273 int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
274 int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
275 int tl_miibus_readreg(struct device *, int, int);
276 void tl_miibus_writereg(struct device *, int, int, int);
277 void tl_miibus_statchg(struct device *);
278 
279 void tl_setmode(struct tl_softc *, int);
280 int tl_calchash(caddr_t);
281 void tl_iff(struct tl_softc *);
282 void tl_setfilt(struct tl_softc *, caddr_t, int);
283 void tl_softreset(struct tl_softc *, int);
284 void tl_hardreset(struct device *);
285 int tl_list_rx_init(struct tl_softc *);
286 int tl_list_tx_init(struct tl_softc *);
287 
288 u_int8_t tl_dio_read8(struct tl_softc *, int);
289 u_int16_t tl_dio_read16(struct tl_softc *, int);
290 u_int32_t tl_dio_read32(struct tl_softc *, int);
291 void tl_dio_write8(struct tl_softc *, int, int);
292 void tl_dio_write16(struct tl_softc *, int, int);
293 void tl_dio_write32(struct tl_softc *, int, int);
294 void tl_dio_setbit(struct tl_softc *, int, int);
295 void tl_dio_clrbit(struct tl_softc *, int, int);
296 void tl_dio_setbit16(struct tl_softc *, int, int);
297 void tl_dio_clrbit16(struct tl_softc *, int, int);
298 
299 u_int8_t
300 tl_dio_read8(struct tl_softc *sc, int reg)
301 {
302 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
303 	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
304 }
305 
306 u_int16_t
307 tl_dio_read16(struct tl_softc *sc, int reg)
308 {
309 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
310 	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
311 }
312 
313 u_int32_t
314 tl_dio_read32(struct tl_softc *sc, int reg)
315 {
316 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
317 	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
318 }
319 
320 void
321 tl_dio_write8(struct tl_softc *sc, int reg, int val)
322 {
323 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
324 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
325 }
326 
327 void
328 tl_dio_write16(struct tl_softc *sc, int reg, int val)
329 {
330 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
331 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
332 }
333 
334 void
335 tl_dio_write32(struct tl_softc *sc, int reg, int val)
336 {
337 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
338 	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
339 }
340 
341 void
342 tl_dio_setbit(struct tl_softc *sc, int reg, int bit)
343 {
344 	u_int8_t			f;
345 
346 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
347 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
348 	f |= bit;
349 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
350 }
351 
352 void
353 tl_dio_clrbit(struct tl_softc *sc, int reg, int bit)
354 {
355 	u_int8_t			f;
356 
357 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
358 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
359 	f &= ~bit;
360 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
361 }
362 
363 void
364 tl_dio_setbit16(struct tl_softc *sc, int reg, int bit)
365 {
366 	u_int16_t			f;
367 
368 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
369 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
370 	f |= bit;
371 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
372 }
373 
374 void
375 tl_dio_clrbit16(struct tl_softc *sc, int reg, int bit)
376 {
377 	u_int16_t			f;
378 
379 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
380 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
381 	f &= ~bit;
382 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
383 }
384 
385 /*
386  * Send an instruction or address to the EEPROM, check for ACK.
387  */
388 u_int8_t
389 tl_eeprom_putbyte(struct tl_softc *sc, int byte)
390 {
391 	int			i, ack = 0;
392 
393 	/*
394 	 * Make sure we're in TX mode.
395 	 */
396 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
397 
398 	/*
399 	 * Feed in each bit and strobe the clock.
400 	 */
401 	for (i = 0x80; i; i >>= 1) {
402 		if (byte & i)
403 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
404 		else
405 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
406 		DELAY(1);
407 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
408 		DELAY(1);
409 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
410 	}
411 
412 	/*
413 	 * Turn off TX mode.
414 	 */
415 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
416 
417 	/*
418 	 * Check for ack.
419 	 */
420 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
421 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
422 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
423 
424 	return(ack);
425 }
426 
427 /*
428  * Read a byte of data stored in the EEPROM at address 'addr.'
429  */
430 u_int8_t
431 tl_eeprom_getbyte(struct tl_softc *sc, int addr, u_int8_t *dest)
432 {
433 	int			i;
434 	u_int8_t		byte = 0;
435 
436 	tl_dio_write8(sc, TL_NETSIO, 0);
437 
438 	EEPROM_START;
439 
440 	/*
441 	 * Send write control code to EEPROM.
442 	 */
443 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
444 		printf("%s: failed to send write command, status: %x\n",
445 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
446 		return(1);
447 	}
448 
449 	/*
450 	 * Send address of byte we want to read.
451 	 */
452 	if (tl_eeprom_putbyte(sc, addr)) {
453 		printf("%s: failed to send address, status: %x\n",
454 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
455 		return(1);
456 	}
457 
458 	EEPROM_STOP;
459 	EEPROM_START;
460 	/*
461 	 * Send read control code to EEPROM.
462 	 */
463 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
464 		printf("%s: failed to send write command, status: %x\n",
465 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
466 		return(1);
467 	}
468 
469 	/*
470 	 * Start reading bits from EEPROM.
471 	 */
472 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
473 	for (i = 0x80; i; i >>= 1) {
474 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
475 		DELAY(1);
476 		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
477 			byte |= i;
478 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
479 		DELAY(1);
480 	}
481 
482 	EEPROM_STOP;
483 
484 	/*
485 	 * No ACK generated for read, so just return byte.
486 	 */
487 
488 	*dest = byte;
489 
490 	return(0);
491 }
492 
493 /*
494  * Read a sequence of bytes from the EEPROM.
495  */
496 int
497 tl_read_eeprom(struct tl_softc *sc, caddr_t dest, int off, int cnt)
498 {
499 	int			err = 0, i;
500 	u_int8_t		byte = 0;
501 
502 	for (i = 0; i < cnt; i++) {
503 		err = tl_eeprom_getbyte(sc, off + i, &byte);
504 		if (err)
505 			break;
506 		*(dest + i) = byte;
507 	}
508 
509 	return(err ? 1 : 0);
510 }
511 
512 void
513 tl_mii_sync(struct tl_softc *sc)
514 {
515 	int			i;
516 
517 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
518 
519 	for (i = 0; i < 32; i++) {
520 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
521 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
522 	}
523 }
524 
525 void
526 tl_mii_send(struct tl_softc *sc, u_int32_t bits, int cnt)
527 {
528 	int			i;
529 
530 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
531 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
532 		if (bits & i)
533 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
534 		else
535 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
536 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
537 	}
538 }
539 
540 int
541 tl_mii_readreg(struct tl_softc *sc, struct tl_mii_frame *frame)
542 {
543 	int			i, ack, s;
544 	int			minten = 0;
545 
546 	s = splnet();
547 
548 	tl_mii_sync(sc);
549 
550 	/*
551 	 * Set up frame for RX.
552 	 */
553 	frame->mii_stdelim = TL_MII_STARTDELIM;
554 	frame->mii_opcode = TL_MII_READOP;
555 	frame->mii_turnaround = 0;
556 	frame->mii_data = 0;
557 
558 	/*
559 	 * Turn off MII interrupt by forcing MINTEN low.
560 	 */
561 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
562 	if (minten)
563 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
564 
565 	/*
566  	 * Turn on data xmit.
567 	 */
568 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
569 
570 	/*
571 	 * Send command/address info.
572 	 */
573 	tl_mii_send(sc, frame->mii_stdelim, 2);
574 	tl_mii_send(sc, frame->mii_opcode, 2);
575 	tl_mii_send(sc, frame->mii_phyaddr, 5);
576 	tl_mii_send(sc, frame->mii_regaddr, 5);
577 
578 	/*
579 	 * Turn off xmit.
580 	 */
581 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
582 
583 	/* Idle bit */
584 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
585 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
586 
587 	/* Check for ack */
588 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
589 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
590 
591 	/* Complete the cycle */
592 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
593 
594 	/*
595 	 * Now try reading data bits. If the ack failed, we still
596 	 * need to clock through 16 cycles to keep the PHYs in sync.
597 	 */
598 	if (ack) {
599 		for(i = 0; i < 16; i++) {
600 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
601 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
602 		}
603 		goto fail;
604 	}
605 
606 	for (i = 0x8000; i; i >>= 1) {
607 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
608 		if (!ack) {
609 			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
610 				frame->mii_data |= i;
611 		}
612 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
613 	}
614 
615 fail:
616 
617 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
618 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
619 
620 	/* Reenable interrupts */
621 	if (minten)
622 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
623 
624 	splx(s);
625 
626 	if (ack)
627 		return(1);
628 	return(0);
629 }
630 
631 int
632 tl_mii_writereg(struct tl_softc *sc, struct tl_mii_frame *frame)
633 {
634 	int			s;
635 	int			minten;
636 
637 	tl_mii_sync(sc);
638 
639 	s = splnet();
640 	/*
641 	 * Set up frame for TX.
642 	 */
643 
644 	frame->mii_stdelim = TL_MII_STARTDELIM;
645 	frame->mii_opcode = TL_MII_WRITEOP;
646 	frame->mii_turnaround = TL_MII_TURNAROUND;
647 
648 	/*
649 	 * Turn off MII interrupt by forcing MINTEN low.
650 	 */
651 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
652 	if (minten)
653 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
654 
655 	/*
656  	 * Turn on data output.
657 	 */
658 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
659 
660 	tl_mii_send(sc, frame->mii_stdelim, 2);
661 	tl_mii_send(sc, frame->mii_opcode, 2);
662 	tl_mii_send(sc, frame->mii_phyaddr, 5);
663 	tl_mii_send(sc, frame->mii_regaddr, 5);
664 	tl_mii_send(sc, frame->mii_turnaround, 2);
665 	tl_mii_send(sc, frame->mii_data, 16);
666 
667 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
668 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
669 
670 	/*
671 	 * Turn off xmit.
672 	 */
673 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
674 
675 	/* Reenable interrupts */
676 	if (minten)
677 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
678 
679 	splx(s);
680 
681 	return(0);
682 }
683 
684 int
685 tl_miibus_readreg(struct device *dev, int phy, int reg)
686 {
687 	struct tl_softc *sc = (struct tl_softc *)dev;
688 	struct tl_mii_frame	frame;
689 
690 	bzero(&frame, sizeof(frame));
691 
692 	frame.mii_phyaddr = phy;
693 	frame.mii_regaddr = reg;
694 	tl_mii_readreg(sc, &frame);
695 
696 	return(frame.mii_data);
697 }
698 
699 void
700 tl_miibus_writereg(struct device *dev, int phy, int reg, int data)
701 {
702 	struct tl_softc *sc = (struct tl_softc *)dev;
703 	struct tl_mii_frame	frame;
704 
705 	bzero(&frame, sizeof(frame));
706 
707 	frame.mii_phyaddr = phy;
708 	frame.mii_regaddr = reg;
709 	frame.mii_data = data;
710 
711 	tl_mii_writereg(sc, &frame);
712 }
713 
714 void
715 tl_miibus_statchg(struct device *dev)
716 {
717 	struct tl_softc *sc = (struct tl_softc *)dev;
718 
719 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
720 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
721 	else
722 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
723 }
724 
725 /*
726  * Set modes for bitrate devices.
727  */
728 void
729 tl_setmode(struct tl_softc *sc, int media)
730 {
731 	if (IFM_SUBTYPE(media) == IFM_10_5)
732 		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
733 	if (IFM_SUBTYPE(media) == IFM_10_T) {
734 		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
735 		if ((media & IFM_GMASK) == IFM_FDX) {
736 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
737 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
738 		} else {
739 			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
740 			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
741 		}
742 	}
743 }
744 
745 /*
746  * Calculate the hash of a MAC address for programming the multicast hash
747  * table.  This hash is simply the address split into 6-bit chunks
748  * XOR'd, e.g.
749  * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
750  * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
751  * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
752  * the folded 24-bit value is split into 6-bit portions and XOR'd.
753  */
754 int
755 tl_calchash(caddr_t addr)
756 {
757 	int			t;
758 
759 	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
760 		(addr[2] ^ addr[5]);
761 	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
762 }
763 
764 /*
765  * The ThunderLAN has a perfect MAC address filter in addition to
766  * the multicast hash filter. The perfect filter can be programmed
767  * with up to four MAC addresses. The first one is always used to
768  * hold the station address, which leaves us free to use the other
769  * three for multicast addresses.
770  */
771 void
772 tl_setfilt(struct tl_softc *sc, caddr_t addr, int slot)
773 {
774 	int			i;
775 	u_int16_t		regaddr;
776 
777 	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
778 
779 	for (i = 0; i < ETHER_ADDR_LEN; i++)
780 		tl_dio_write8(sc, regaddr + i, *(addr + i));
781 }
782 
783 /*
784  * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
785  * linked list. This is fine, except addresses are added from the head
786  * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
787  * group to always be in the perfect filter, but as more groups are added,
788  * the 224.0.0.1 entry (which is always added first) gets pushed down
789  * the list and ends up at the tail. So after 3 or 4 multicast groups
790  * are added, the all-hosts entry gets pushed out of the perfect filter
791  * and into the hash table.
792  *
793  * Because the multicast list is a doubly-linked list as opposed to a
794  * circular queue, we don't have the ability to just grab the tail of
795  * the list and traverse it backwards. Instead, we have to traverse
796  * the list once to find the tail, then traverse it again backwards to
797  * update the multicast filter.
798  */
799 void
800 tl_iff(struct tl_softc *sc)
801 {
802 	struct ifnet		*ifp = &sc->arpcom.ac_if;
803 	struct arpcom		*ac = &sc->arpcom;
804 	struct ether_multistep step;
805 	struct ether_multi *enm;
806 	u_int32_t		hashes[2];
807 	int			h = 0;
808 
809 	tl_dio_clrbit(sc, TL_NETCMD, (TL_CMD_CAF | TL_CMD_NOBRX));
810 	bzero(hashes, sizeof(hashes));
811 	ifp->if_flags &= ~IFF_ALLMULTI;
812 
813 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
814 		ifp->if_flags |= IFF_ALLMULTI;
815 		if (ifp->if_flags & IFF_PROMISC)
816 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
817 		else
818 			hashes[0] = hashes[1] = 0xffffffff;
819 	} else {
820 		ETHER_FIRST_MULTI(step, ac, enm);
821 		while (enm != NULL) {
822 			h = tl_calchash(enm->enm_addrlo);
823 
824 			if (h < 32)
825 				hashes[0] |= (1 << h);
826 			else
827 				hashes[1] |= (1 << (h - 32));
828 
829 			ETHER_NEXT_MULTI(step, enm);
830 		}
831 	}
832 
833 	tl_dio_write32(sc, TL_HASH1, hashes[0]);
834 	tl_dio_write32(sc, TL_HASH2, hashes[1]);
835 }
836 
837 /*
838  * This routine is recommended by the ThunderLAN manual to insure that
839  * the internal PHY is powered up correctly. It also recommends a one
840  * second pause at the end to 'wait for the clocks to start' but in my
841  * experience this isn't necessary.
842  */
843 void
844 tl_hardreset(struct device *dev)
845 {
846 	struct tl_softc		*sc = (struct tl_softc *)dev;
847 	int			i;
848 	u_int16_t		flags;
849 
850 	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
851 
852 	for (i =0 ; i < MII_NPHY; i++)
853 		tl_miibus_writereg(dev, i, MII_BMCR, flags);
854 
855 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
856 	tl_mii_sync(sc);
857 	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
858 
859 	DELAY(5000);
860 }
861 
862 void
863 tl_softreset(struct tl_softc *sc, int internal)
864 {
865         u_int32_t               cmd, dummy, i;
866 
867         /* Assert the adapter reset bit. */
868 	CMD_SET(sc, TL_CMD_ADRST);
869         /* Turn off interrupts */
870 	CMD_SET(sc, TL_CMD_INTSOFF);
871 
872 	/* First, clear the stats registers. */
873 	for (i = 0; i < 5; i++)
874 		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
875 
876         /* Clear Areg and Hash registers */
877 	for (i = 0; i < 8; i++)
878 		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
879 
880         /*
881 	 * Set up Netconfig register. Enable one channel and
882 	 * one fragment mode.
883 	 */
884 	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
885 	if (internal && !sc->tl_bitrate) {
886 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
887 	} else {
888 		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
889 	}
890 
891 	/* Handle cards with bitrate devices. */
892 	if (sc->tl_bitrate)
893 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
894 
895 	/*
896 	 * Load adapter irq pacing timer and tx threshold.
897 	 * We make the transmit threshold 1 initially but we may
898 	 * change that later.
899 	 */
900 	cmd = CSR_READ_4(sc, TL_HOSTCMD);
901 	cmd |= TL_CMD_NES;
902 	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
903 	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
904 	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
905 
906         /* Unreset the MII */
907 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
908 
909 	/* Take the adapter out of reset */
910 	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
911 
912 	/* Wait for things to settle down a little. */
913 	DELAY(500);
914 }
915 
916 /*
917  * Initialize the transmit lists.
918  */
919 int
920 tl_list_tx_init(struct tl_softc *sc)
921 {
922 	struct tl_chain_data	*cd;
923 	struct tl_list_data	*ld;
924 	int			i;
925 
926 	cd = &sc->tl_cdata;
927 	ld = sc->tl_ldata;
928 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
929 		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
930 		if (i == (TL_TX_LIST_CNT - 1))
931 			cd->tl_tx_chain[i].tl_next = NULL;
932 		else
933 			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
934 	}
935 
936 	cd->tl_tx_free = &cd->tl_tx_chain[0];
937 	cd->tl_tx_tail = cd->tl_tx_head = NULL;
938 	sc->tl_txeoc = 1;
939 
940 	return(0);
941 }
942 
943 /*
944  * Initialize the RX lists and allocate mbufs for them.
945  */
946 int
947 tl_list_rx_init(struct tl_softc *sc)
948 {
949 	struct tl_chain_data	*cd;
950 	struct tl_list_data	*ld;
951 	int			i;
952 
953 	cd = &sc->tl_cdata;
954 	ld = sc->tl_ldata;
955 
956 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
957 		cd->tl_rx_chain[i].tl_ptr =
958 			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
959 		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
960 			return(ENOBUFS);
961 		if (i == (TL_RX_LIST_CNT - 1)) {
962 			cd->tl_rx_chain[i].tl_next = NULL;
963 			ld->tl_rx_list[i].tlist_fptr = 0;
964 		} else {
965 			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
966 			ld->tl_rx_list[i].tlist_fptr =
967 					VTOPHYS(&ld->tl_rx_list[i + 1]);
968 		}
969 	}
970 
971 	cd->tl_rx_head = &cd->tl_rx_chain[0];
972 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
973 
974 	return(0);
975 }
976 
977 int
978 tl_newbuf(struct tl_softc *sc, struct tl_chain_onefrag *c)
979 {
980 	struct mbuf		*m_new = NULL;
981 
982 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
983 	if (m_new == NULL) {
984 		return(ENOBUFS);
985 	}
986 
987 	MCLGET(m_new, M_DONTWAIT);
988 	if (!(m_new->m_flags & M_EXT)) {
989 		m_freem(m_new);
990 		return(ENOBUFS);
991 	}
992 
993 #ifdef __alpha__
994 	m_new->m_data += 2;
995 #endif
996 
997 	c->tl_mbuf = m_new;
998 	c->tl_next = NULL;
999 	c->tl_ptr->tlist_frsize = MCLBYTES;
1000 	c->tl_ptr->tlist_fptr = 0;
1001 	c->tl_ptr->tl_frag.tlist_dadr = VTOPHYS(mtod(m_new, caddr_t));
1002 	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1003 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1004 
1005 	return(0);
1006 }
1007 /*
1008  * Interrupt handler for RX 'end of frame' condition (EOF). This
1009  * tells us that a full ethernet frame has been captured and we need
1010  * to handle it.
1011  *
1012  * Reception is done using 'lists' which consist of a header and a
1013  * series of 10 data count/data address pairs that point to buffers.
1014  * Initially you're supposed to create a list, populate it with pointers
1015  * to buffers, then load the physical address of the list into the
1016  * ch_parm register. The adapter is then supposed to DMA the received
1017  * frame into the buffers for you.
1018  *
1019  * To make things as fast as possible, we have the chip DMA directly
1020  * into mbufs. This saves us from having to do a buffer copy: we can
1021  * just hand the mbufs directly to ether_input(). Once the frame has
1022  * been sent on its way, the 'list' structure is assigned a new buffer
1023  * and moved to the end of the RX chain. As long we we stay ahead of
1024  * the chip, it will always think it has an endless receive channel.
1025  *
1026  * If we happen to fall behind and the chip manages to fill up all of
1027  * the buffers, it will generate an end of channel interrupt and wait
1028  * for us to empty the chain and restart the receiver.
1029  */
1030 int
1031 tl_intvec_rxeof(void *xsc, u_int32_t type)
1032 {
1033 	struct tl_softc		*sc;
1034 	int			r = 0, total_len = 0;
1035 	struct ether_header	*eh;
1036 	struct mbuf		*m;
1037 	struct ifnet		*ifp;
1038 	struct tl_chain_onefrag	*cur_rx;
1039 
1040 	sc = xsc;
1041 	ifp = &sc->arpcom.ac_if;
1042 
1043 	while(sc->tl_cdata.tl_rx_head != NULL) {
1044 		cur_rx = sc->tl_cdata.tl_rx_head;
1045 		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1046 			break;
1047 		r++;
1048 		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1049 		m = cur_rx->tl_mbuf;
1050 		total_len = cur_rx->tl_ptr->tlist_frsize;
1051 
1052 		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1053 			ifp->if_ierrors++;
1054 			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1055 			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1056 			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1057 			continue;
1058 		}
1059 
1060 		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1061 						VTOPHYS(cur_rx->tl_ptr);
1062 		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1063 		sc->tl_cdata.tl_rx_tail = cur_rx;
1064 
1065 		eh = mtod(m, struct ether_header *);
1066 		m->m_pkthdr.rcvif = ifp;
1067 
1068 		/*
1069 		 * Note: when the ThunderLAN chip is in 'capture all
1070 		 * frames' mode, it will receive its own transmissions.
1071 		 * We drop don't need to process our own transmissions,
1072 		 * so we drop them here and continue.
1073 		 */
1074 		/*if (ifp->if_flags & IFF_PROMISC && */
1075 		if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1076 		 					ETHER_ADDR_LEN)) {
1077 				m_freem(m);
1078 				continue;
1079 		}
1080 
1081 		m->m_pkthdr.len = m->m_len = total_len;
1082 #if NBPFILTER > 0
1083 		/*
1084 	 	 * Handle BPF listeners. Let the BPF user see the packet, but
1085 	 	 * don't pass it up to the ether_input() layer unless it's
1086 	 	 * a broadcast packet, multicast packet, matches our ethernet
1087 	 	 * address or the interface is in promiscuous mode. If we don't
1088 	 	 * want the packet, just forget it. We leave the mbuf in place
1089 	 	 * since it can be used again later.
1090 	 	 */
1091 		if (ifp->if_bpf)
1092 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1093 #endif
1094 		/* pass it on. */
1095 		ether_input_mbuf(ifp, m);
1096 	}
1097 
1098 	return(r);
1099 }
1100 
1101 /*
1102  * The RX-EOC condition hits when the ch_parm address hasn't been
1103  * initialized or the adapter reached a list with a forward pointer
1104  * of 0 (which indicates the end of the chain). In our case, this means
1105  * the card has hit the end of the receive buffer chain and we need to
1106  * empty out the buffers and shift the pointer back to the beginning again.
1107  */
1108 int
1109 tl_intvec_rxeoc(void *xsc, u_int32_t type)
1110 {
1111 	struct tl_softc		*sc;
1112 	int			r;
1113 	struct tl_chain_data	*cd;
1114 
1115 	sc = xsc;
1116 	cd = &sc->tl_cdata;
1117 
1118 	/* Flush out the receive queue and ack RXEOF interrupts. */
1119 	r = tl_intvec_rxeof(xsc, type);
1120 	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1121 	r = 1;
1122 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1123 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1124 	CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(sc->tl_cdata.tl_rx_head->tl_ptr));
1125 	r |= (TL_CMD_GO|TL_CMD_RT);
1126 	return(r);
1127 }
1128 
1129 int
1130 tl_intvec_txeof(void *xsc, u_int32_t type)
1131 {
1132 	struct tl_softc		*sc;
1133 	int			r = 0;
1134 	struct tl_chain		*cur_tx;
1135 
1136 	sc = xsc;
1137 
1138 	/*
1139 	 * Go through our tx list and free mbufs for those
1140 	 * frames that have been sent.
1141 	 */
1142 	while (sc->tl_cdata.tl_tx_head != NULL) {
1143 		cur_tx = sc->tl_cdata.tl_tx_head;
1144 		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1145 			break;
1146 		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1147 
1148 		r++;
1149 		m_freem(cur_tx->tl_mbuf);
1150 		cur_tx->tl_mbuf = NULL;
1151 
1152 		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1153 		sc->tl_cdata.tl_tx_free = cur_tx;
1154 		if (!cur_tx->tl_ptr->tlist_fptr)
1155 			break;
1156 	}
1157 
1158 	return(r);
1159 }
1160 
1161 /*
1162  * The transmit end of channel interrupt. The adapter triggers this
1163  * interrupt to tell us it hit the end of the current transmit list.
1164  *
1165  * A note about this: it's possible for a condition to arise where
1166  * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1167  * You have to avoid this since the chip expects things to go in a
1168  * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1169  * When the TXEOF handler is called, it will free all of the transmitted
1170  * frames and reset the tx_head pointer to NULL. However, a TXEOC
1171  * interrupt should be received and acknowledged before any more frames
1172  * are queued for transmission. If tl_statrt() is called after TXEOF
1173  * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1174  * it could attempt to issue a transmit command prematurely.
1175  *
1176  * To guard against this, tl_start() will only issue transmit commands
1177  * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1178  * can set this flag once tl_start() has cleared it.
1179  */
1180 int
1181 tl_intvec_txeoc(void *xsc, u_int32_t type)
1182 {
1183 	struct tl_softc		*sc;
1184 	struct ifnet		*ifp;
1185 	u_int32_t		cmd;
1186 
1187 	sc = xsc;
1188 	ifp = &sc->arpcom.ac_if;
1189 
1190 	/* Clear the timeout timer. */
1191 	ifp->if_timer = 0;
1192 
1193 	if (sc->tl_cdata.tl_tx_head == NULL) {
1194 		ifp->if_flags &= ~IFF_OACTIVE;
1195 		sc->tl_cdata.tl_tx_tail = NULL;
1196 		sc->tl_txeoc = 1;
1197 	} else {
1198 		sc->tl_txeoc = 0;
1199 		/* First we have to ack the EOC interrupt. */
1200 		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1201 		/* Then load the address of the next TX list. */
1202 		CSR_WRITE_4(sc, TL_CH_PARM,
1203 		    VTOPHYS(sc->tl_cdata.tl_tx_head->tl_ptr));
1204 		/* Restart TX channel. */
1205 		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1206 		cmd &= ~TL_CMD_RT;
1207 		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1208 		CMD_PUT(sc, cmd);
1209 		return(0);
1210 	}
1211 
1212 	return(1);
1213 }
1214 
1215 int
1216 tl_intvec_adchk(void *xsc, u_int32_t type)
1217 {
1218 	struct tl_softc		*sc;
1219 
1220 	sc = xsc;
1221 
1222 	if (type)
1223 		printf("%s: adapter check: %x\n", sc->sc_dev.dv_xname,
1224 			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1225 
1226 	tl_softreset(sc, 1);
1227 	tl_stop(sc);
1228 	tl_init(sc);
1229 	CMD_SET(sc, TL_CMD_INTSON);
1230 
1231 	return(0);
1232 }
1233 
1234 int
1235 tl_intvec_netsts(void *xsc, u_int32_t type)
1236 {
1237 	struct tl_softc		*sc;
1238 	u_int16_t		netsts;
1239 
1240 	sc = xsc;
1241 
1242 	netsts = tl_dio_read16(sc, TL_NETSTS);
1243 	tl_dio_write16(sc, TL_NETSTS, netsts);
1244 
1245 	printf("%s: network status: %x\n", sc->sc_dev.dv_xname, netsts);
1246 
1247 	return(1);
1248 }
1249 
1250 int
1251 tl_intr(void *xsc)
1252 {
1253 	struct tl_softc		*sc;
1254 	struct ifnet		*ifp;
1255 	int			r = 0;
1256 	u_int32_t		type = 0;
1257 	u_int16_t		ints = 0;
1258 	u_int8_t		ivec = 0;
1259 
1260 	sc = xsc;
1261 
1262 	/* Disable interrupts */
1263 	ints = CSR_READ_2(sc, TL_HOST_INT);
1264 	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1265 	type = (ints << 16) & 0xFFFF0000;
1266 	ivec = (ints & TL_VEC_MASK) >> 5;
1267 	ints = (ints & TL_INT_MASK) >> 2;
1268 
1269 	ifp = &sc->arpcom.ac_if;
1270 
1271 	switch(ints) {
1272 	case (TL_INTR_INVALID):
1273 		/* Re-enable interrupts but don't ack this one. */
1274 		CMD_PUT(sc, type);
1275 		r = 0;
1276 		break;
1277 	case (TL_INTR_TXEOF):
1278 		r = tl_intvec_txeof((void *)sc, type);
1279 		break;
1280 	case (TL_INTR_TXEOC):
1281 		r = tl_intvec_txeoc((void *)sc, type);
1282 		break;
1283 	case (TL_INTR_STATOFLOW):
1284 		tl_stats_update(sc);
1285 		r = 1;
1286 		break;
1287 	case (TL_INTR_RXEOF):
1288 		r = tl_intvec_rxeof((void *)sc, type);
1289 		break;
1290 	case (TL_INTR_DUMMY):
1291 		printf("%s: got a dummy interrupt\n", sc->sc_dev.dv_xname);
1292 		r = 1;
1293 		break;
1294 	case (TL_INTR_ADCHK):
1295 		if (ivec)
1296 			r = tl_intvec_adchk((void *)sc, type);
1297 		else
1298 			r = tl_intvec_netsts((void *)sc, type);
1299 		break;
1300 	case (TL_INTR_RXEOC):
1301 		r = tl_intvec_rxeoc((void *)sc, type);
1302 		break;
1303 	default:
1304 		printf("%s: bogus interrupt type\n", sc->sc_dev.dv_xname);
1305 		break;
1306 	}
1307 
1308 	/* Re-enable interrupts */
1309 	if (r) {
1310 		CMD_PUT(sc, TL_CMD_ACK | r | type);
1311 	}
1312 
1313 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1314 		tl_start(ifp);
1315 
1316 	return r;
1317 }
1318 
1319 void
1320 tl_stats_update(void *xsc)
1321 {
1322 	struct tl_softc		*sc;
1323 	struct ifnet		*ifp;
1324 	struct tl_stats		tl_stats;
1325 	u_int32_t		*p;
1326 	int			s;
1327 
1328 	s = splnet();
1329 
1330 	bzero(&tl_stats, sizeof(struct tl_stats));
1331 
1332 	sc = xsc;
1333 	ifp = &sc->arpcom.ac_if;
1334 
1335 	p = (u_int32_t *)&tl_stats;
1336 
1337 	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1338 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1339 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1340 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1341 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1342 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1343 
1344 	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1345 	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1346 				tl_stats.tl_tx_multi_collision;
1347 	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1348 	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1349 			    tl_rx_overrun(tl_stats);
1350 	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1351 
1352 	if (tl_tx_underrun(tl_stats)) {
1353 		u_int8_t	tx_thresh;
1354 		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1355 		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1356 			tx_thresh >>= 4;
1357 			tx_thresh++;
1358 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1359 			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1360 		}
1361 	}
1362 
1363 	timeout_add_sec(&sc->tl_stats_tmo, 1);
1364 
1365 	if (!sc->tl_bitrate)
1366 		mii_tick(&sc->sc_mii);
1367 
1368 	splx(s);
1369 }
1370 
1371 /*
1372  * Encapsulate an mbuf chain in a list by coupling the mbuf data
1373  * pointers to the fragment pointers.
1374  */
1375 int
1376 tl_encap(struct tl_softc *sc, struct tl_chain *c, struct mbuf *m_head)
1377 {
1378 	int			frag = 0;
1379 	struct tl_frag		*f = NULL;
1380 	int			total_len;
1381 	struct mbuf		*m;
1382 
1383 	/*
1384  	 * Start packing the mbufs in this chain into
1385 	 * the fragment pointers. Stop when we run out
1386  	 * of fragments or hit the end of the mbuf chain.
1387 	 */
1388 	m = m_head;
1389 	total_len = 0;
1390 
1391 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1392 		if (m->m_len != 0) {
1393 			if (frag == TL_MAXFRAGS)
1394 				break;
1395 			total_len+= m->m_len;
1396 			c->tl_ptr->tl_frag[frag].tlist_dadr =
1397 				VTOPHYS(mtod(m, vaddr_t));
1398 			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1399 			frag++;
1400 		}
1401 	}
1402 
1403 	/*
1404 	 * Handle special cases.
1405 	 * Special case #1: we used up all 10 fragments, but
1406 	 * we have more mbufs left in the chain. Copy the
1407 	 * data into an mbuf cluster. Note that we don't
1408 	 * bother clearing the values in the other fragment
1409 	 * pointers/counters; it wouldn't gain us anything,
1410 	 * and would waste cycles.
1411 	 */
1412 	if (m != NULL) {
1413 		struct mbuf		*m_new = NULL;
1414 
1415 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1416 		if (m_new == NULL)
1417 			return(1);
1418 		if (m_head->m_pkthdr.len > MHLEN) {
1419 			MCLGET(m_new, M_DONTWAIT);
1420 			if (!(m_new->m_flags & M_EXT)) {
1421 				m_freem(m_new);
1422 				return(1);
1423 			}
1424 		}
1425 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1426 					mtod(m_new, caddr_t));
1427 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1428 		m_freem(m_head);
1429 		m_head = m_new;
1430 		f = &c->tl_ptr->tl_frag[0];
1431 		f->tlist_dadr = VTOPHYS(mtod(m_new, caddr_t));
1432 		f->tlist_dcnt = total_len = m_new->m_len;
1433 		frag = 1;
1434 	}
1435 
1436 	/*
1437 	 * Special case #2: the frame is smaller than the minimum
1438 	 * frame size. We have to pad it to make the chip happy.
1439 	 */
1440 	if (total_len < TL_MIN_FRAMELEN) {
1441 		f = &c->tl_ptr->tl_frag[frag];
1442 		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1443 		f->tlist_dadr = VTOPHYS(&sc->tl_ldata->tl_pad);
1444 		total_len += f->tlist_dcnt;
1445 		frag++;
1446 	}
1447 
1448 	c->tl_mbuf = m_head;
1449 	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1450 	c->tl_ptr->tlist_frsize = total_len;
1451 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1452 	c->tl_ptr->tlist_fptr = 0;
1453 
1454 	return(0);
1455 }
1456 
1457 /*
1458  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1459  * to the mbuf data regions directly in the transmit lists. We also save a
1460  * copy of the pointers since the transmit list fragment pointers are
1461  * physical addresses.
1462  */
1463 void
1464 tl_start(struct ifnet *ifp)
1465 {
1466 	struct tl_softc		*sc;
1467 	struct mbuf		*m_head = NULL;
1468 	u_int32_t		cmd;
1469 	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1470 
1471 	sc = ifp->if_softc;
1472 
1473 	/*
1474 	 * Check for an available queue slot. If there are none,
1475 	 * punt.
1476 	 */
1477 	if (sc->tl_cdata.tl_tx_free == NULL) {
1478 		ifp->if_flags |= IFF_OACTIVE;
1479 		return;
1480 	}
1481 
1482 	start_tx = sc->tl_cdata.tl_tx_free;
1483 
1484 	while(sc->tl_cdata.tl_tx_free != NULL) {
1485 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1486 		if (m_head == NULL)
1487 			break;
1488 
1489 		/* Pick a chain member off the free list. */
1490 		cur_tx = sc->tl_cdata.tl_tx_free;
1491 		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1492 
1493 		cur_tx->tl_next = NULL;
1494 
1495 		/* Pack the data into the list. */
1496 		tl_encap(sc, cur_tx, m_head);
1497 
1498 		/* Chain it together */
1499 		if (prev != NULL) {
1500 			prev->tl_next = cur_tx;
1501 			prev->tl_ptr->tlist_fptr = VTOPHYS(cur_tx->tl_ptr);
1502 		}
1503 		prev = cur_tx;
1504 
1505 		/*
1506 		 * If there's a BPF listener, bounce a copy of this frame
1507 		 * to him.
1508 		 */
1509 #if NBPFILTER > 0
1510 		if (ifp->if_bpf)
1511 			bpf_mtap(ifp->if_bpf, cur_tx->tl_mbuf,
1512 			    BPF_DIRECTION_OUT);
1513 #endif
1514 	}
1515 
1516 	/*
1517 	 * If there are no packets queued, bail.
1518 	 */
1519 	if (cur_tx == NULL)
1520 		return;
1521 
1522 	/*
1523 	 * That's all we can stands, we can't stands no more.
1524 	 * If there are no other transfers pending, then issue the
1525 	 * TX GO command to the adapter to start things moving.
1526 	 * Otherwise, just leave the data in the queue and let
1527 	 * the EOF/EOC interrupt handler send.
1528 	 */
1529 	if (sc->tl_cdata.tl_tx_head == NULL) {
1530 		sc->tl_cdata.tl_tx_head = start_tx;
1531 		sc->tl_cdata.tl_tx_tail = cur_tx;
1532 
1533 		if (sc->tl_txeoc) {
1534 			sc->tl_txeoc = 0;
1535 			CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(start_tx->tl_ptr));
1536 			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1537 			cmd &= ~TL_CMD_RT;
1538 			cmd |= TL_CMD_GO|TL_CMD_INTSON;
1539 			CMD_PUT(sc, cmd);
1540 		}
1541 	} else {
1542 		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1543 		sc->tl_cdata.tl_tx_tail = cur_tx;
1544 	}
1545 
1546 	/*
1547 	 * Set a timeout in case the chip goes out to lunch.
1548 	 */
1549 	ifp->if_timer = 10;
1550 }
1551 
1552 void
1553 tl_init(void *xsc)
1554 {
1555 	struct tl_softc		*sc = xsc;
1556 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1557         int			s;
1558 
1559 	s = splnet();
1560 
1561 	/*
1562 	 * Cancel pending I/O.
1563 	 */
1564 	tl_stop(sc);
1565 
1566 	/* Initialize TX FIFO threshold */
1567 	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1568 	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1569 
1570 	/* Set PCI burst size */
1571 	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
1572 
1573 	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
1574 
1575 	/* Init our MAC address */
1576 	tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
1577 
1578 	/* Program promiscuous mode and multicast filters. */
1579 	tl_iff(sc);
1580 
1581 	/* Init circular RX list. */
1582 	if (tl_list_rx_init(sc) == ENOBUFS) {
1583 		printf("%s: initialization failed: no memory for rx buffers\n",
1584 			sc->sc_dev.dv_xname);
1585 		tl_stop(sc);
1586 		splx(s);
1587 		return;
1588 	}
1589 
1590 	/* Init TX pointers. */
1591 	tl_list_tx_init(sc);
1592 
1593 	/* Enable PCI interrupts. */
1594 	CMD_SET(sc, TL_CMD_INTSON);
1595 
1596 	/* Load the address of the rx list */
1597 	CMD_SET(sc, TL_CMD_RT);
1598 	CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(&sc->tl_ldata->tl_rx_list[0]));
1599 
1600 	if (!sc->tl_bitrate)
1601 		mii_mediachg(&sc->sc_mii);
1602 	else
1603 		tl_ifmedia_upd(ifp);
1604 
1605 	/* Send the RX go command */
1606 	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
1607 
1608 	splx(s);
1609 
1610 	/* Start the stats update counter */
1611 	timeout_set(&sc->tl_stats_tmo, tl_stats_update, sc);
1612 	timeout_add_sec(&sc->tl_stats_tmo, 1);
1613 	timeout_set(&sc->tl_wait_tmo, tl_wait_up, sc);
1614 	timeout_add_sec(&sc->tl_wait_tmo, 2);
1615 }
1616 
1617 /*
1618  * Set media options.
1619  */
1620 int
1621 tl_ifmedia_upd(struct ifnet *ifp)
1622 {
1623 	struct tl_softc *sc = ifp->if_softc;
1624 
1625 	if (sc->tl_bitrate)
1626 		tl_setmode(sc, sc->ifmedia.ifm_media);
1627 	else
1628 		mii_mediachg(&sc->sc_mii);
1629 
1630 	return(0);
1631 }
1632 
1633 /*
1634  * Report current media status.
1635  */
1636 void
1637 tl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1638 {
1639 	struct tl_softc		*sc;
1640 	struct mii_data		*mii;
1641 
1642 	sc = ifp->if_softc;
1643 	mii = &sc->sc_mii;
1644 
1645 	ifmr->ifm_active = IFM_ETHER;
1646 	if (sc->tl_bitrate) {
1647 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
1648 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
1649 		else
1650 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1651 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
1652 			ifmr->ifm_active |= IFM_HDX;
1653 		else
1654 			ifmr->ifm_active |= IFM_FDX;
1655 		return;
1656 	} else {
1657 		mii_pollstat(mii);
1658 		ifmr->ifm_active = mii->mii_media_active;
1659 		ifmr->ifm_status = mii->mii_media_status;
1660 	}
1661 }
1662 
1663 int
1664 tl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1665 {
1666 	struct tl_softc		*sc = ifp->if_softc;
1667 	struct ifaddr		*ifa = (struct ifaddr *) data;
1668 	struct ifreq		*ifr = (struct ifreq *) data;
1669 	int			s, error = 0;
1670 
1671 	s = splnet();
1672 
1673 	switch(command) {
1674 	case SIOCSIFADDR:
1675 		ifp->if_flags |= IFF_UP;
1676 		if (!(ifp->if_flags & IFF_RUNNING))
1677 			tl_init(sc);
1678 #ifdef INET
1679 		if (ifa->ifa_addr->sa_family == AF_INET)
1680 			arp_ifinit(&sc->arpcom, ifa);
1681 #endif
1682 		break;
1683 
1684 	case SIOCSIFFLAGS:
1685 		if (ifp->if_flags & IFF_UP) {
1686 			if (ifp->if_flags & IFF_RUNNING)
1687 				error = ENETRESET;
1688 			else
1689 				tl_init(sc);
1690 		} else {
1691 			if (ifp->if_flags & IFF_RUNNING)
1692 				tl_stop(sc);
1693 		}
1694 		break;
1695 
1696 	case SIOCSIFMEDIA:
1697 	case SIOCGIFMEDIA:
1698 		if (sc->tl_bitrate)
1699 			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1700 		else
1701 			error = ifmedia_ioctl(ifp, ifr,
1702 			    &sc->sc_mii.mii_media, command);
1703 		break;
1704 
1705 	default:
1706 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1707 	}
1708 
1709 	if (error == ENETRESET) {
1710 		if (ifp->if_flags & IFF_RUNNING)
1711 			tl_iff(sc);
1712 		error = 0;
1713 	}
1714 
1715 	splx(s);
1716 	return(error);
1717 }
1718 
1719 void
1720 tl_watchdog(struct ifnet *ifp)
1721 {
1722 	struct tl_softc		*sc;
1723 
1724 	sc = ifp->if_softc;
1725 
1726 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1727 
1728 	ifp->if_oerrors++;
1729 
1730 	tl_softreset(sc, 1);
1731 	tl_init(sc);
1732 }
1733 
1734 /*
1735  * Stop the adapter and free any mbufs allocated to the
1736  * RX and TX lists.
1737  */
1738 void
1739 tl_stop(struct tl_softc *sc)
1740 {
1741 	int			i;
1742 	struct ifnet		*ifp;
1743 
1744 	ifp = &sc->arpcom.ac_if;
1745 
1746 	/* Stop the stats updater. */
1747 	timeout_del(&sc->tl_stats_tmo);
1748 	timeout_del(&sc->tl_wait_tmo);
1749 
1750 	/* Stop the transmitter */
1751 	CMD_CLR(sc, TL_CMD_RT);
1752 	CMD_SET(sc, TL_CMD_STOP);
1753 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1754 
1755 	/* Stop the receiver */
1756 	CMD_SET(sc, TL_CMD_RT);
1757 	CMD_SET(sc, TL_CMD_STOP);
1758 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1759 
1760 	/*
1761 	 * Disable host interrupts.
1762 	 */
1763 	CMD_SET(sc, TL_CMD_INTSOFF);
1764 
1765 	/*
1766 	 * Clear list pointer.
1767 	 */
1768 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1769 
1770 	/*
1771 	 * Free the RX lists.
1772 	 */
1773 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1774 		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
1775 			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
1776 			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
1777 		}
1778 	}
1779 	bzero(&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list));
1780 
1781 	/*
1782 	 * Free the TX list buffers.
1783 	 */
1784 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1785 		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
1786 			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
1787 			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
1788 		}
1789 	}
1790 	bzero(&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list));
1791 
1792 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1793 }
1794 
1795 int
1796 tl_probe(struct device *parent, void *match, void *aux)
1797 {
1798 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1799 
1800 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) {
1801 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TI_TLAN)
1802 			return 1;
1803 		return 0;
1804 	}
1805 
1806 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ) {
1807 		switch (PCI_PRODUCT(pa->pa_id)) {
1808 		case PCI_PRODUCT_COMPAQ_N100TX:
1809 		case PCI_PRODUCT_COMPAQ_N10T:
1810 		case PCI_PRODUCT_COMPAQ_IntNF3P:
1811 		case PCI_PRODUCT_COMPAQ_DPNet100TX:
1812 		case PCI_PRODUCT_COMPAQ_IntPL100TX:
1813 		case PCI_PRODUCT_COMPAQ_DP4000:
1814 		case PCI_PRODUCT_COMPAQ_N10T2:
1815 		case PCI_PRODUCT_COMPAQ_N10_TX_UTP:
1816 		case PCI_PRODUCT_COMPAQ_NF3P:
1817 		case PCI_PRODUCT_COMPAQ_NF3P_BNC:
1818 			return 1;
1819 		}
1820 		return 0;
1821 	}
1822 
1823 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
1824 		switch (PCI_PRODUCT(pa->pa_id)) {
1825 		case PCI_PRODUCT_OLICOM_OC2183:
1826 		case PCI_PRODUCT_OLICOM_OC2325:
1827 		case PCI_PRODUCT_OLICOM_OC2326:
1828 			return 1;
1829 		}
1830 		return 0;
1831 	}
1832 
1833 	return 0;
1834 }
1835 
1836 void
1837 tl_attach(struct device *parent, struct device *self, void *aux)
1838 {
1839 	struct tl_softc *sc = (struct tl_softc *)self;
1840 	struct pci_attach_args *pa = aux;
1841 	pci_chipset_tag_t pc = pa->pa_pc;
1842 	pci_intr_handle_t ih;
1843 	const char *intrstr = NULL;
1844 	struct ifnet *ifp = &sc->arpcom.ac_if;
1845 	bus_size_t iosize;
1846 	u_int32_t command;
1847 	int i, rseg;
1848 	bus_dma_segment_t seg;
1849 	bus_dmamap_t dmamap;
1850 	caddr_t kva;
1851 
1852 	/*
1853 	 * Map control/status registers.
1854 	 */
1855 
1856 #ifdef TL_USEIOSPACE
1857 	if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
1858 	    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) {
1859 		if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_IO, 0,
1860 		    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) {
1861 			printf(": can't map i/o space\n");
1862 			return;
1863 		}
1864 	}
1865 #else
1866 	if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
1867 	    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){
1868 		if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_MEM, 0,
1869 		    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){
1870 			printf(": can't map mem space\n");
1871 			return;
1872 		}
1873 	}
1874 #endif
1875 
1876 	/*
1877 	 * Manual wants the PCI latency timer jacked up to 0xff
1878 	 */
1879 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER);
1880 	command |= 0x0000ff00;
1881 	pci_conf_write(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER, command);
1882 
1883 	/*
1884 	 * Allocate our interrupt.
1885 	 */
1886 	if (pci_intr_map(pa, &ih)) {
1887 		printf(": couldn't map interrupt\n");
1888 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1889 		return;
1890 	}
1891 	intrstr = pci_intr_string(pc, ih);
1892 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, tl_intr, sc,
1893 	    self->dv_xname);
1894 	if (sc->sc_ih == NULL) {
1895 		printf(": could not establish interrupt");
1896 		if (intrstr != NULL)
1897 			printf(" at %s", intrstr);
1898 		printf("\n");
1899 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1900 		return;
1901 	}
1902 	printf(": %s", intrstr);
1903 
1904 	sc->sc_dmat = pa->pa_dmat;
1905 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct tl_list_data),
1906 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
1907 		printf("%s: can't alloc list\n", sc->sc_dev.dv_xname);
1908 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1909 		return;
1910 	}
1911 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct tl_list_data),
1912 	    &kva, BUS_DMA_NOWAIT)) {
1913 		printf("%s: can't map dma buffers (%zd bytes)\n",
1914 		    sc->sc_dev.dv_xname, sizeof(struct tl_list_data));
1915 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1916 		return;
1917 	}
1918 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct tl_list_data), 1,
1919 	    sizeof(struct tl_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
1920 		printf("%s: can't create dma map\n", sc->sc_dev.dv_xname);
1921 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
1922 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1923 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1924 		return;
1925 	}
1926 	if (bus_dmamap_load(sc->sc_dmat, dmamap, kva,
1927 	    sizeof(struct tl_list_data), NULL, BUS_DMA_NOWAIT)) {
1928 		printf("%s: can't load dma map\n", sc->sc_dev.dv_xname);
1929 		bus_dmamap_destroy(sc->sc_dmat, dmamap);
1930 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
1931 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1932 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1933 		return;
1934 	}
1935 	sc->tl_ldata = (struct tl_list_data *)kva;
1936 
1937 	for (sc->tl_product = tl_prods; sc->tl_product->tp_vend;
1938 	     sc->tl_product++) {
1939 		if (sc->tl_product->tp_vend == PCI_VENDOR(pa->pa_id) &&
1940 		    sc->tl_product->tp_prod == PCI_PRODUCT(pa->pa_id))
1941 			break;
1942 	}
1943 
1944 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ ||
1945 	    PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI)
1946 		sc->tl_eeaddr = TL_EEPROM_EADDR;
1947 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM)
1948 		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1949 
1950 	/*
1951 	 * Reset adapter.
1952 	 */
1953 	tl_softreset(sc, 1);
1954 	tl_hardreset(self);
1955 	DELAY(1000000);
1956 	tl_softreset(sc, 1);
1957 
1958 	/*
1959 	 * Get station address from the EEPROM.
1960 	 */
1961 	if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1962 	    sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1963 		printf("\n%s: failed to read station address\n",
1964 		    sc->sc_dev.dv_xname);
1965 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1966 		return;
1967 	}
1968 
1969 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
1970 		for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1971 			u_int16_t *p;
1972 
1973 			p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
1974 			*p = ntohs(*p);
1975 		}
1976 	}
1977 
1978 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
1979 
1980 	ifp = &sc->arpcom.ac_if;
1981 	ifp->if_softc = sc;
1982 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1983 	ifp->if_ioctl = tl_ioctl;
1984 	ifp->if_start = tl_start;
1985 	ifp->if_watchdog = tl_watchdog;
1986 	IFQ_SET_MAXLEN(&ifp->if_snd, TL_TX_LIST_CNT - 1);
1987 	IFQ_SET_READY(&ifp->if_snd);
1988 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1989 
1990 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1991 
1992 	/*
1993 	 * Reset adapter (again).
1994 	 */
1995 	tl_softreset(sc, 1);
1996 	tl_hardreset(self);
1997 	DELAY(1000000);
1998 	tl_softreset(sc, 1);
1999 
2000 	/*
2001 	 * Do MII setup. If no PHYs are found, then this is a
2002 	 * bitrate ThunderLAN chip that only supports 10baseT
2003 	 * and AUI/BNC.
2004 	 */
2005 	sc->sc_mii.mii_ifp = ifp;
2006 	sc->sc_mii.mii_readreg = tl_miibus_readreg;
2007 	sc->sc_mii.mii_writereg = tl_miibus_writereg;
2008 	sc->sc_mii.mii_statchg = tl_miibus_statchg;
2009 	ifmedia_init(&sc->sc_mii.mii_media, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2010 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
2011 	    0);
2012 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2013 		struct ifmedia *ifm;
2014 		sc->tl_bitrate = 1;
2015 		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2016 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
2017 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2018 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2019 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
2020 		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
2021 		/* Reset again, this time setting bitrate mode. */
2022 		tl_softreset(sc, 1);
2023 		ifm = &sc->ifmedia;
2024 		ifm->ifm_media = ifm->ifm_cur->ifm_media;
2025 		tl_ifmedia_upd(ifp);
2026 	} else
2027 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2028 
2029 	/*
2030 	 * Attach us everywhere.
2031 	 */
2032 	if_attach(ifp);
2033 	ether_ifattach(ifp);
2034 }
2035 
2036 void
2037 tl_wait_up(void *xsc)
2038 {
2039 	struct tl_softc *sc = xsc;
2040 	struct ifnet *ifp = &sc->arpcom.ac_if;
2041 
2042 	ifp->if_flags |= IFF_RUNNING;
2043 	ifp->if_flags &= ~IFF_OACTIVE;
2044 }
2045 
2046 struct cfattach tl_ca = {
2047 	sizeof(struct tl_softc), tl_probe, tl_attach
2048 };
2049 
2050 struct cfdriver tl_cd = {
2051 	NULL, "tl", DV_IFNET
2052 };
2053