xref: /openbsd-src/sys/dev/pci/if_tl.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: if_tl.c,v 1.30 2003/06/30 02:52:51 avsm Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_tl.c,v 1.64 2001/02/06 10:11:48 phk Exp $
35  */
36 
37 /*
38  * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
39  * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
40  * the National Semiconductor DP83840A physical interface and the
41  * Microchip Technology 24Cxx series serial EEPROM.
42  *
43  * Written using the following four documents:
44  *
45  * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
46  * National Semiconductor DP83840A data sheet (www.national.com)
47  * Microchip Technology 24C02C data sheet (www.microchip.com)
48  * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
49  *
50  * Written by Bill Paul <wpaul@ctr.columbia.edu>
51  * Electrical Engineering Department
52  * Columbia University, New York City
53  */
54 
55 /*
56  * Some notes about the ThunderLAN:
57  *
58  * The ThunderLAN controller is a single chip containing PCI controller
59  * logic, approximately 3K of on-board SRAM, a LAN controller, and media
60  * independent interface (MII) bus. The MII allows the ThunderLAN chip to
61  * control up to 32 different physical interfaces (PHYs). The ThunderLAN
62  * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
63  * to act as a complete ethernet interface.
64  *
65  * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
66  * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
67  * in full or half duplex. Some of the Compaq Deskpro machines use a
68  * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
69  * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
70  * concert with the ThunderLAN's internal PHY to provide full 10/100
71  * support. This is cheaper than using a standalone external PHY for both
72  * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
73  * A serial EEPROM is also attached to the ThunderLAN chip to provide
74  * power-up default register settings and for storing the adapter's
75  * station address. Although not supported by this driver, the ThunderLAN
76  * chip can also be connected to token ring PHYs.
77  *
78  * The ThunderLAN has a set of registers which can be used to issue
79  * commands, acknowledge interrupts, and to manipulate other internal
80  * registers on its DIO bus. The primary registers can be accessed
81  * using either programmed I/O (inb/outb) or via PCI memory mapping,
82  * depending on how the card is configured during the PCI probing
83  * phase. It is even possible to have both PIO and memory mapped
84  * access turned on at the same time.
85  *
86  * Frame reception and transmission with the ThunderLAN chip is done
87  * using frame 'lists.' A list structure looks more or less like this:
88  *
89  * struct tl_frag {
90  *	u_int32_t		fragment_address;
91  *	u_int32_t		fragment_size;
92  * };
93  * struct tl_list {
94  *	u_int32_t		forward_pointer;
95  *	u_int16_t		cstat;
96  *	u_int16_t		frame_size;
97  *	struct tl_frag		fragments[10];
98  * };
99  *
100  * The forward pointer in the list header can be either a 0 or the address
101  * of another list, which allows several lists to be linked together. Each
102  * list contains up to 10 fragment descriptors. This means the chip allows
103  * ethernet frames to be broken up into up to 10 chunks for transfer to
104  * and from the SRAM. Note that the forward pointer and fragment buffer
105  * addresses are physical memory addresses, not virtual. Note also that
106  * a single ethernet frame can not span lists: if the host wants to
107  * transmit a frame and the frame data is split up over more than 10
108  * buffers, the frame has to collapsed before it can be transmitted.
109  *
110  * To receive frames, the driver sets up a number of lists and populates
111  * the fragment descriptors, then it sends an RX GO command to the chip.
112  * When a frame is received, the chip will DMA it into the memory regions
113  * specified by the fragment descriptors and then trigger an RX 'end of
114  * frame interrupt' when done. The driver may choose to use only one
115  * fragment per list; this may result is slighltly less efficient use
116  * of memory in exchange for improving performance.
117  *
118  * To transmit frames, the driver again sets up lists and fragment
119  * descriptors, only this time the buffers contain frame data that
120  * is to be DMA'ed into the chip instead of out of it. Once the chip
121  * has transferred the data into its on-board SRAM, it will trigger a
122  * TX 'end of frame' interrupt. It will also generate an 'end of channel'
123  * interrupt when it reaches the end of the list.
124  */
125 
126 /*
127  * Some notes about this driver:
128  *
129  * The ThunderLAN chip provides a couple of different ways to organize
130  * reception, transmission and interrupt handling. The simplest approach
131  * is to use one list each for transmission and reception. In this mode,
132  * the ThunderLAN will generate two interrupts for every received frame
133  * (one RX EOF and one RX EOC) and two for each transmitted frame (one
134  * TX EOF and one TX EOC). This may make the driver simpler but it hurts
135  * performance to have to handle so many interrupts.
136  *
137  * Initially I wanted to create a circular list of receive buffers so
138  * that the ThunderLAN chip would think there was an infinitely long
139  * receive channel and never deliver an RXEOC interrupt. However this
140  * doesn't work correctly under heavy load: while the manual says the
141  * chip will trigger an RXEOF interrupt each time a frame is copied into
142  * memory, you can't count on the chip waiting around for you to acknowledge
143  * the interrupt before it starts trying to DMA the next frame. The result
144  * is that the chip might traverse the entire circular list and then wrap
145  * around before you have a chance to do anything about it. Consequently,
146  * the receive list is terminated (with a 0 in the forward pointer in the
147  * last element). Each time an RXEOF interrupt arrives, the used list
148  * is shifted to the end of the list. This gives the appearance of an
149  * infinitely large RX chain so long as the driver doesn't fall behind
150  * the chip and allow all of the lists to be filled up.
151  *
152  * If all the lists are filled, the adapter will deliver an RX 'end of
153  * channel' interrupt when it hits the 0 forward pointer at the end of
154  * the chain. The RXEOC handler then cleans out the RX chain and resets
155  * the list head pointer in the ch_parm register and restarts the receiver.
156  *
157  * For frame transmission, it is possible to program the ThunderLAN's
158  * transmit interrupt threshold so that the chip can acknowledge multiple
159  * lists with only a single TX EOF interrupt. This allows the driver to
160  * queue several frames in one shot, and only have to handle a total
161  * two interrupts (one TX EOF and one TX EOC) no matter how many frames
162  * are transmitted. Frame transmission is done directly out of the
163  * mbufs passed to the tl_start() routine via the interface send queue.
164  * The driver simply sets up the fragment descriptors in the transmit
165  * lists to point to the mbuf data regions and sends a TX GO command.
166  *
167  * Note that since the RX and TX lists themselves are always used
168  * only by the driver, the are malloc()ed once at driver initialization
169  * time and never free()ed.
170  *
171  * Also, in order to remain as platform independent as possible, this
172  * driver uses memory mapped register access to manipulate the card
173  * as opposed to programmed I/O. This avoids the use of the inb/outb
174  * (and related) instructions which are specific to the i386 platform.
175  *
176  * Using these techniques, this driver achieves very high performance
177  * by minimizing the amount of interrupts generated during large
178  * transfers and by completely avoiding buffer copies. Frame transfer
179  * to and from the ThunderLAN chip is performed entirely by the chip
180  * itself thereby reducing the load on the host CPU.
181  */
182 
183 #include "bpfilter.h"
184 
185 #include <sys/param.h>
186 #include <sys/systm.h>
187 #include <sys/sockio.h>
188 #include <sys/mbuf.h>
189 #include <sys/malloc.h>
190 #include <sys/kernel.h>
191 #include <sys/socket.h>
192 #include <sys/device.h>
193 #include <sys/timeout.h>
194 
195 #include <net/if.h>
196 
197 #ifdef INET
198 #include <netinet/in.h>
199 #include <netinet/in_systm.h>
200 #include <netinet/in_var.h>
201 #include <netinet/ip.h>
202 #include <netinet/if_ether.h>
203 #endif
204 
205 #include <net/if_dl.h>
206 #include <net/if_media.h>
207 
208 #if NBPFILTER > 0
209 #include <net/bpf.h>
210 #endif
211 
212 #include <uvm/uvm_extern.h>              /* for vtophys */
213 
214 #include <dev/mii/mii.h>
215 #include <dev/mii/miivar.h>
216 
217 #include <dev/pci/pcireg.h>
218 #include <dev/pci/pcivar.h>
219 #include <dev/pci/pcidevs.h>
220 
221 /*
222  * Default to using PIO register access mode to pacify certain
223  * laptop docking stations with built-in ThunderLAN chips that
224  * don't seem to handle memory mapped mode properly.
225  */
226 #define TL_USEIOSPACE
227 
228 #include <dev/pci/if_tlreg.h>
229 #include <dev/mii/tlphyvar.h>
230 
231 const struct tl_products tl_prods[] = {
232 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T },
233 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5 },
234 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2 },
235 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
236 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
237 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5 },
238 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2 },
239 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5 },
240 	{ PCI_VENDOR_TI, PCI_PRODUCT_TI_TLAN, 0 },
241 	{ 0, 0, 0 }
242 };
243 
244 int tl_probe(struct device *, void *, void *);
245 void tl_attach(struct device *, struct device *, void *);
246 void tl_wait_up(void *);
247 int tl_intvec_rxeoc(void *, u_int32_t);
248 int tl_intvec_txeoc(void *, u_int32_t);
249 int tl_intvec_txeof(void *, u_int32_t);
250 int tl_intvec_rxeof(void *, u_int32_t);
251 int tl_intvec_adchk(void *, u_int32_t);
252 int tl_intvec_netsts(void *, u_int32_t);
253 
254 int tl_newbuf(struct tl_softc *,
255 					struct tl_chain_onefrag *);
256 void tl_stats_update(void *);
257 int tl_encap(struct tl_softc *, struct tl_chain *,
258 						struct mbuf *);
259 
260 int tl_intr(void *);
261 void tl_start(struct ifnet *);
262 int tl_ioctl(struct ifnet *, u_long, caddr_t);
263 void tl_init(void *);
264 void tl_stop(struct tl_softc *);
265 void tl_watchdog(struct ifnet *);
266 void tl_shutdown(void *);
267 int tl_ifmedia_upd(struct ifnet *);
268 void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
269 
270 u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
271 u_int8_t	tl_eeprom_getbyte(struct tl_softc *,
272 						int, u_int8_t *);
273 int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
274 
275 void tl_mii_sync(struct tl_softc *);
276 void tl_mii_send(struct tl_softc *, u_int32_t, int);
277 int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
278 int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
279 int tl_miibus_readreg(struct device *, int, int);
280 void tl_miibus_writereg(struct device *, int, int, int);
281 void tl_miibus_statchg(struct device *);
282 
283 void tl_setmode(struct tl_softc *, int);
284 int tl_calchash(caddr_t);
285 void tl_setmulti(struct tl_softc *);
286 void tl_setfilt(struct tl_softc *, caddr_t, int);
287 void tl_softreset(struct tl_softc *, int);
288 void tl_hardreset(struct device *);
289 int tl_list_rx_init(struct tl_softc *);
290 int tl_list_tx_init(struct tl_softc *);
291 
292 u_int8_t tl_dio_read8(struct tl_softc *, int);
293 u_int16_t tl_dio_read16(struct tl_softc *, int);
294 u_int32_t tl_dio_read32(struct tl_softc *, int);
295 void tl_dio_write8(struct tl_softc *, int, int);
296 void tl_dio_write16(struct tl_softc *, int, int);
297 void tl_dio_write32(struct tl_softc *, int, int);
298 void tl_dio_setbit(struct tl_softc *, int, int);
299 void tl_dio_clrbit(struct tl_softc *, int, int);
300 void tl_dio_setbit16(struct tl_softc *, int, int);
301 void tl_dio_clrbit16(struct tl_softc *, int, int);
302 
303 u_int8_t tl_dio_read8(sc, reg)
304 	struct tl_softc		*sc;
305 	int			reg;
306 {
307 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
308 	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
309 }
310 
311 u_int16_t tl_dio_read16(sc, reg)
312 	struct tl_softc		*sc;
313 	int			reg;
314 {
315 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
316 	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
317 }
318 
319 u_int32_t tl_dio_read32(sc, reg)
320 	struct tl_softc		*sc;
321 	int			reg;
322 {
323 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
324 	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
325 }
326 
327 void tl_dio_write8(sc, reg, val)
328 	struct tl_softc		*sc;
329 	int			reg;
330 	int			val;
331 {
332 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
333 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
334 	return;
335 }
336 
337 void tl_dio_write16(sc, reg, val)
338 	struct tl_softc		*sc;
339 	int			reg;
340 	int			val;
341 {
342 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
343 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
344 	return;
345 }
346 
347 void tl_dio_write32(sc, reg, val)
348 	struct tl_softc		*sc;
349 	int			reg;
350 	int			val;
351 {
352 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
353 	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
354 	return;
355 }
356 
357 void tl_dio_setbit(sc, reg, bit)
358 	struct tl_softc		*sc;
359 	int			reg;
360 	int			bit;
361 {
362 	u_int8_t			f;
363 
364 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
365 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
366 	f |= bit;
367 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
368 
369 	return;
370 }
371 
372 void tl_dio_clrbit(sc, reg, bit)
373 	struct tl_softc		*sc;
374 	int			reg;
375 	int			bit;
376 {
377 	u_int8_t			f;
378 
379 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
380 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
381 	f &= ~bit;
382 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
383 
384 	return;
385 }
386 
387 void tl_dio_setbit16(sc, reg, bit)
388 	struct tl_softc		*sc;
389 	int			reg;
390 	int			bit;
391 {
392 	u_int16_t			f;
393 
394 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
395 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
396 	f |= bit;
397 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
398 
399 	return;
400 }
401 
402 void tl_dio_clrbit16(sc, reg, bit)
403 	struct tl_softc		*sc;
404 	int			reg;
405 	int			bit;
406 {
407 	u_int16_t			f;
408 
409 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
410 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
411 	f &= ~bit;
412 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
413 
414 	return;
415 }
416 
417 /*
418  * Send an instruction or address to the EEPROM, check for ACK.
419  */
420 u_int8_t tl_eeprom_putbyte(sc, byte)
421 	struct tl_softc		*sc;
422 	int			byte;
423 {
424 	register int		i, ack = 0;
425 
426 	/*
427 	 * Make sure we're in TX mode.
428 	 */
429 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
430 
431 	/*
432 	 * Feed in each bit and strobe the clock.
433 	 */
434 	for (i = 0x80; i; i >>= 1) {
435 		if (byte & i) {
436 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
437 		} else {
438 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
439 		}
440 		DELAY(1);
441 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
442 		DELAY(1);
443 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
444 	}
445 
446 	/*
447 	 * Turn off TX mode.
448 	 */
449 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
450 
451 	/*
452 	 * Check for ack.
453 	 */
454 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
455 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
456 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
457 
458 	return(ack);
459 }
460 
461 /*
462  * Read a byte of data stored in the EEPROM at address 'addr.'
463  */
464 u_int8_t tl_eeprom_getbyte(sc, addr, dest)
465 	struct tl_softc		*sc;
466 	int			addr;
467 	u_int8_t		*dest;
468 {
469 	register int		i;
470 	u_int8_t		byte = 0;
471 
472 	tl_dio_write8(sc, TL_NETSIO, 0);
473 
474 	EEPROM_START;
475 
476 	/*
477 	 * Send write control code to EEPROM.
478 	 */
479 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
480 		printf("%s: failed to send write command, status: %x\n",
481 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
482 		return(1);
483 	}
484 
485 	/*
486 	 * Send address of byte we want to read.
487 	 */
488 	if (tl_eeprom_putbyte(sc, addr)) {
489 		printf("%s: failed to send address, status: %x\n",
490 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
491 		return(1);
492 	}
493 
494 	EEPROM_STOP;
495 	EEPROM_START;
496 	/*
497 	 * Send read control code to EEPROM.
498 	 */
499 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
500 		printf("%s: failed to send write command, status: %x\n",
501 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
502 		return(1);
503 	}
504 
505 	/*
506 	 * Start reading bits from EEPROM.
507 	 */
508 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
509 	for (i = 0x80; i; i >>= 1) {
510 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
511 		DELAY(1);
512 		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
513 			byte |= i;
514 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
515 		DELAY(1);
516 	}
517 
518 	EEPROM_STOP;
519 
520 	/*
521 	 * No ACK generated for read, so just return byte.
522 	 */
523 
524 	*dest = byte;
525 
526 	return(0);
527 }
528 
529 /*
530  * Read a sequence of bytes from the EEPROM.
531  */
532 int tl_read_eeprom(sc, dest, off, cnt)
533 	struct tl_softc		*sc;
534 	caddr_t			dest;
535 	int			off;
536 	int			cnt;
537 {
538 	int			err = 0, i;
539 	u_int8_t		byte = 0;
540 
541 	for (i = 0; i < cnt; i++) {
542 		err = tl_eeprom_getbyte(sc, off + i, &byte);
543 		if (err)
544 			break;
545 		*(dest + i) = byte;
546 	}
547 
548 	return(err ? 1 : 0);
549 }
550 
551 void tl_mii_sync(sc)
552 	struct tl_softc		*sc;
553 {
554 	register int		i;
555 
556 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
557 
558 	for (i = 0; i < 32; i++) {
559 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
560 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
561 	}
562 
563 	return;
564 }
565 
566 void tl_mii_send(sc, bits, cnt)
567 	struct tl_softc		*sc;
568 	u_int32_t		bits;
569 	int			cnt;
570 {
571 	int			i;
572 
573 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
574 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
575 		if (bits & i) {
576 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
577 		} else {
578 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
579 		}
580 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
581 	}
582 }
583 
584 int tl_mii_readreg(sc, frame)
585 	struct tl_softc		*sc;
586 	struct tl_mii_frame	*frame;
587 
588 {
589 	int			i, ack, s;
590 	int			minten = 0;
591 
592 	s = splimp();
593 
594 	tl_mii_sync(sc);
595 
596 	/*
597 	 * Set up frame for RX.
598 	 */
599 	frame->mii_stdelim = TL_MII_STARTDELIM;
600 	frame->mii_opcode = TL_MII_READOP;
601 	frame->mii_turnaround = 0;
602 	frame->mii_data = 0;
603 
604 	/*
605 	 * Turn off MII interrupt by forcing MINTEN low.
606 	 */
607 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
608 	if (minten) {
609 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
610 	}
611 
612 	/*
613  	 * Turn on data xmit.
614 	 */
615 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
616 
617 	/*
618 	 * Send command/address info.
619 	 */
620 	tl_mii_send(sc, frame->mii_stdelim, 2);
621 	tl_mii_send(sc, frame->mii_opcode, 2);
622 	tl_mii_send(sc, frame->mii_phyaddr, 5);
623 	tl_mii_send(sc, frame->mii_regaddr, 5);
624 
625 	/*
626 	 * Turn off xmit.
627 	 */
628 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
629 
630 	/* Idle bit */
631 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
632 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
633 
634 	/* Check for ack */
635 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
636 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
637 
638 	/* Complete the cycle */
639 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
640 
641 	/*
642 	 * Now try reading data bits. If the ack failed, we still
643 	 * need to clock through 16 cycles to keep the PHYs in sync.
644 	 */
645 	if (ack) {
646 		for(i = 0; i < 16; i++) {
647 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
648 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
649 		}
650 		goto fail;
651 	}
652 
653 	for (i = 0x8000; i; i >>= 1) {
654 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
655 		if (!ack) {
656 			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
657 				frame->mii_data |= i;
658 		}
659 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
660 	}
661 
662 fail:
663 
664 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
665 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
666 
667 	/* Reenable interrupts */
668 	if (minten) {
669 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
670 	}
671 
672 	splx(s);
673 
674 	if (ack)
675 		return(1);
676 	return(0);
677 }
678 
679 int tl_mii_writereg(sc, frame)
680 	struct tl_softc		*sc;
681 	struct tl_mii_frame	*frame;
682 
683 {
684 	int			s;
685 	int			minten;
686 
687 	tl_mii_sync(sc);
688 
689 	s = splimp();
690 	/*
691 	 * Set up frame for TX.
692 	 */
693 
694 	frame->mii_stdelim = TL_MII_STARTDELIM;
695 	frame->mii_opcode = TL_MII_WRITEOP;
696 	frame->mii_turnaround = TL_MII_TURNAROUND;
697 
698 	/*
699 	 * Turn off MII interrupt by forcing MINTEN low.
700 	 */
701 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
702 	if (minten) {
703 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
704 	}
705 
706 	/*
707  	 * Turn on data output.
708 	 */
709 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
710 
711 	tl_mii_send(sc, frame->mii_stdelim, 2);
712 	tl_mii_send(sc, frame->mii_opcode, 2);
713 	tl_mii_send(sc, frame->mii_phyaddr, 5);
714 	tl_mii_send(sc, frame->mii_regaddr, 5);
715 	tl_mii_send(sc, frame->mii_turnaround, 2);
716 	tl_mii_send(sc, frame->mii_data, 16);
717 
718 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
719 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
720 
721 	/*
722 	 * Turn off xmit.
723 	 */
724 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
725 
726 	/* Reenable interrupts */
727 	if (minten)
728 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
729 
730 	splx(s);
731 
732 	return(0);
733 }
734 
735 int tl_miibus_readreg(dev, phy, reg)
736 	struct device		*dev;
737 	int			phy, reg;
738 {
739 	struct tl_softc *sc = (struct tl_softc *)dev;
740 	struct tl_mii_frame	frame;
741 
742 	bzero((char *)&frame, sizeof(frame));
743 
744 	frame.mii_phyaddr = phy;
745 	frame.mii_regaddr = reg;
746 	tl_mii_readreg(sc, &frame);
747 
748 	return(frame.mii_data);
749 }
750 
751 void tl_miibus_writereg(dev, phy, reg, data)
752 	struct device		*dev;
753 	int			phy, reg, data;
754 {
755 	struct tl_softc *sc = (struct tl_softc *)dev;
756 	struct tl_mii_frame	frame;
757 
758 	bzero((char *)&frame, sizeof(frame));
759 
760 	frame.mii_phyaddr = phy;
761 	frame.mii_regaddr = reg;
762 	frame.mii_data = data;
763 
764 	tl_mii_writereg(sc, &frame);
765 }
766 
767 void tl_miibus_statchg(dev)
768 	struct device *dev;
769 {
770 	struct tl_softc *sc = (struct tl_softc *)dev;
771 
772 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) {
773 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
774 	} else {
775 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
776 	}
777 }
778 
779 /*
780  * Set modes for bitrate devices.
781  */
782 void tl_setmode(sc, media)
783 	struct tl_softc		*sc;
784 	int			media;
785 {
786 	if (IFM_SUBTYPE(media) == IFM_10_5)
787 		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
788 	if (IFM_SUBTYPE(media) == IFM_10_T) {
789 		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
790 		if ((media & IFM_GMASK) == IFM_FDX) {
791 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
792 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
793 		} else {
794 			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
795 			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
796 		}
797 	}
798 }
799 
800 /*
801  * Calculate the hash of a MAC address for programming the multicast hash
802  * table.  This hash is simply the address split into 6-bit chunks
803  * XOR'd, e.g.
804  * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
805  * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
806  * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
807  * the folded 24-bit value is split into 6-bit portions and XOR'd.
808  */
809 int tl_calchash(addr)
810 	caddr_t			addr;
811 {
812 	int			t;
813 
814 	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
815 		(addr[2] ^ addr[5]);
816 	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
817 }
818 
819 /*
820  * The ThunderLAN has a perfect MAC address filter in addition to
821  * the multicast hash filter. The perfect filter can be programmed
822  * with up to four MAC addresses. The first one is always used to
823  * hold the station address, which leaves us free to use the other
824  * three for multicast addresses.
825  */
826 void tl_setfilt(sc, addr, slot)
827 	struct tl_softc		*sc;
828 	caddr_t			addr;
829 	int			slot;
830 {
831 	int			i;
832 	u_int16_t		regaddr;
833 
834 	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
835 
836 	for (i = 0; i < ETHER_ADDR_LEN; i++)
837 		tl_dio_write8(sc, regaddr + i, *(addr + i));
838 
839 	return;
840 }
841 
842 /*
843  * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
844  * linked list. This is fine, except addresses are added from the head
845  * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
846  * group to always be in the perfect filter, but as more groups are added,
847  * the 224.0.0.1 entry (which is always added first) gets pushed down
848  * the list and ends up at the tail. So after 3 or 4 multicast groups
849  * are added, the all-hosts entry gets pushed out of the perfect filter
850  * and into the hash table.
851  *
852  * Because the multicast list is a doubly-linked list as opposed to a
853  * circular queue, we don't have the ability to just grab the tail of
854  * the list and traverse it backwards. Instead, we have to traverse
855  * the list once to find the tail, then traverse it again backwards to
856  * update the multicast filter.
857  */
858 void tl_setmulti(sc)
859 	struct tl_softc		*sc;
860 {
861 	struct ifnet		*ifp;
862 	u_int32_t		hashes[2] = { 0, 0 };
863 	int			h;
864 	struct arpcom *ac = &sc->arpcom;
865 	struct ether_multistep step;
866 	struct ether_multi *enm;
867 	ifp = &sc->arpcom.ac_if;
868 
869 	tl_dio_write32(sc, TL_HASH1, 0);
870 	tl_dio_write32(sc, TL_HASH2, 0);
871 
872 	ifp->if_flags &= ~IFF_ALLMULTI;
873 #if 0
874 	ETHER_FIRST_MULTI(step, ac, enm);
875 	while (enm != NULL) {
876 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) {
877 			h = tl_calchash(enm->enm_addrlo);
878 			hashes[h/32] |= (1 << (h % 32));
879 		} else {
880 			hashes[0] = hashes[1] = 0xffffffff;
881 			ifp->if_flags |= IFF_ALLMULTI;
882 			break;
883 		}
884 		ETHER_NEXT_MULTI(step, enm);
885 	}
886 #else
887 	ETHER_FIRST_MULTI(step, ac, enm);
888 	h = 0;
889 	while (enm != NULL) {
890 		h++;
891 		ETHER_NEXT_MULTI(step, enm);
892 	}
893 	if (h) {
894 		hashes[0] = hashes[1] = 0xffffffff;
895 		ifp->if_flags |= IFF_ALLMULTI;
896 	} else {
897 		hashes[0] = hashes[1] = 0x00000000;
898 	}
899 #endif
900 
901 	tl_dio_write32(sc, TL_HASH1, hashes[0]);
902 	tl_dio_write32(sc, TL_HASH2, hashes[1]);
903 
904 	return;
905 }
906 
907 /*
908  * This routine is recommended by the ThunderLAN manual to insure that
909  * the internal PHY is powered up correctly. It also recommends a one
910  * second pause at the end to 'wait for the clocks to start' but in my
911  * experience this isn't necessary.
912  */
913 void tl_hardreset(dev)
914 	struct device *dev;
915 {
916 	struct tl_softc		*sc = (struct tl_softc *)dev;
917 	int			i;
918 	u_int16_t		flags;
919 
920 	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
921 
922 	for (i =0 ; i < MII_NPHY; i++)
923 		tl_miibus_writereg(dev, i, MII_BMCR, flags);
924 
925 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
926 	tl_mii_sync(sc);
927 	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
928 
929 	DELAY(5000);
930 	return;
931 }
932 
933 void tl_softreset(sc, internal)
934 	struct tl_softc		*sc;
935 	int			internal;
936 {
937         u_int32_t               cmd, dummy, i;
938 
939         /* Assert the adapter reset bit. */
940 	CMD_SET(sc, TL_CMD_ADRST);
941         /* Turn off interrupts */
942 	CMD_SET(sc, TL_CMD_INTSOFF);
943 
944 	/* First, clear the stats registers. */
945 	for (i = 0; i < 5; i++)
946 		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
947 
948         /* Clear Areg and Hash registers */
949 	for (i = 0; i < 8; i++)
950 		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
951 
952         /*
953 	 * Set up Netconfig register. Enable one channel and
954 	 * one fragment mode.
955 	 */
956 	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
957 	if (internal && !sc->tl_bitrate) {
958 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
959 	} else {
960 		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
961 	}
962 
963 	/* Handle cards with bitrate devices. */
964 	if (sc->tl_bitrate)
965 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
966 
967 	/*
968 	 * Load adapter irq pacing timer and tx threshold.
969 	 * We make the transmit threshold 1 initially but we may
970 	 * change that later.
971 	 */
972 	cmd = CSR_READ_4(sc, TL_HOSTCMD);
973 	cmd |= TL_CMD_NES;
974 	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
975 	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
976 	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
977 
978         /* Unreset the MII */
979 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
980 
981 	/* Take the adapter out of reset */
982 	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
983 
984 	/* Wait for things to settle down a little. */
985 	DELAY(500);
986 
987         return;
988 }
989 
990 /*
991  * Initialize the transmit lists.
992  */
993 int tl_list_tx_init(sc)
994 	struct tl_softc		*sc;
995 {
996 	struct tl_chain_data	*cd;
997 	struct tl_list_data	*ld;
998 	int			i;
999 
1000 	cd = &sc->tl_cdata;
1001 	ld = sc->tl_ldata;
1002 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1003 		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1004 		if (i == (TL_TX_LIST_CNT - 1))
1005 			cd->tl_tx_chain[i].tl_next = NULL;
1006 		else
1007 			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1008 	}
1009 
1010 	cd->tl_tx_free = &cd->tl_tx_chain[0];
1011 	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1012 	sc->tl_txeoc = 1;
1013 
1014 	return(0);
1015 }
1016 
1017 /*
1018  * Initialize the RX lists and allocate mbufs for them.
1019  */
1020 int tl_list_rx_init(sc)
1021 	struct tl_softc		*sc;
1022 {
1023 	struct tl_chain_data	*cd;
1024 	struct tl_list_data	*ld;
1025 	int			i;
1026 
1027 	cd = &sc->tl_cdata;
1028 	ld = sc->tl_ldata;
1029 
1030 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1031 		cd->tl_rx_chain[i].tl_ptr =
1032 			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1033 		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1034 			return(ENOBUFS);
1035 		if (i == (TL_RX_LIST_CNT - 1)) {
1036 			cd->tl_rx_chain[i].tl_next = NULL;
1037 			ld->tl_rx_list[i].tlist_fptr = 0;
1038 		} else {
1039 			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1040 			ld->tl_rx_list[i].tlist_fptr =
1041 					vtophys(&ld->tl_rx_list[i + 1]);
1042 		}
1043 	}
1044 
1045 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1046 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1047 
1048 	return(0);
1049 }
1050 
1051 int tl_newbuf(sc, c)
1052 	struct tl_softc		*sc;
1053 	struct tl_chain_onefrag	*c;
1054 {
1055 	struct mbuf		*m_new = NULL;
1056 
1057 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1058 	if (m_new == NULL) {
1059 		return(ENOBUFS);
1060 	}
1061 
1062 	MCLGET(m_new, M_DONTWAIT);
1063 	if (!(m_new->m_flags & M_EXT)) {
1064 		m_freem(m_new);
1065 		return(ENOBUFS);
1066 	}
1067 
1068 #ifdef __alpha__
1069 	m_new->m_data += 2;
1070 #endif
1071 
1072 	c->tl_mbuf = m_new;
1073 	c->tl_next = NULL;
1074 	c->tl_ptr->tlist_frsize = MCLBYTES;
1075 	c->tl_ptr->tlist_fptr = 0;
1076 	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1077 	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1078 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1079 
1080 	return(0);
1081 }
1082 /*
1083  * Interrupt handler for RX 'end of frame' condition (EOF). This
1084  * tells us that a full ethernet frame has been captured and we need
1085  * to handle it.
1086  *
1087  * Reception is done using 'lists' which consist of a header and a
1088  * series of 10 data count/data address pairs that point to buffers.
1089  * Initially you're supposed to create a list, populate it with pointers
1090  * to buffers, then load the physical address of the list into the
1091  * ch_parm register. The adapter is then supposed to DMA the received
1092  * frame into the buffers for you.
1093  *
1094  * To make things as fast as possible, we have the chip DMA directly
1095  * into mbufs. This saves us from having to do a buffer copy: we can
1096  * just hand the mbufs directly to ether_input(). Once the frame has
1097  * been sent on its way, the 'list' structure is assigned a new buffer
1098  * and moved to the end of the RX chain. As long we we stay ahead of
1099  * the chip, it will always think it has an endless receive channel.
1100  *
1101  * If we happen to fall behind and the chip manages to fill up all of
1102  * the buffers, it will generate an end of channel interrupt and wait
1103  * for us to empty the chain and restart the receiver.
1104  */
1105 int tl_intvec_rxeof(xsc, type)
1106 	void			*xsc;
1107 	u_int32_t		type;
1108 {
1109 	struct tl_softc		*sc;
1110 	int			r = 0, total_len = 0;
1111 	struct ether_header	*eh;
1112 	struct mbuf		*m;
1113 	struct ifnet		*ifp;
1114 	struct tl_chain_onefrag	*cur_rx;
1115 
1116 	sc = xsc;
1117 	ifp = &sc->arpcom.ac_if;
1118 
1119 	while(sc->tl_cdata.tl_rx_head != NULL) {
1120 		cur_rx = sc->tl_cdata.tl_rx_head;
1121 		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1122 			break;
1123 		r++;
1124 		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1125 		m = cur_rx->tl_mbuf;
1126 		total_len = cur_rx->tl_ptr->tlist_frsize;
1127 
1128 		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1129 			ifp->if_ierrors++;
1130 			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1131 			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1132 			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1133 			continue;
1134 		}
1135 
1136 		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1137 						vtophys(cur_rx->tl_ptr);
1138 		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1139 		sc->tl_cdata.tl_rx_tail = cur_rx;
1140 
1141 		eh = mtod(m, struct ether_header *);
1142 		m->m_pkthdr.rcvif = ifp;
1143 
1144 		/*
1145 		 * Note: when the ThunderLAN chip is in 'capture all
1146 		 * frames' mode, it will receive its own transmissions.
1147 		 * We drop don't need to process our own transmissions,
1148 		 * so we drop them here and continue.
1149 		 */
1150 		/*if (ifp->if_flags & IFF_PROMISC && */
1151 		if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1152 		 					ETHER_ADDR_LEN)) {
1153 				m_freem(m);
1154 				continue;
1155 		}
1156 
1157 		m->m_pkthdr.len = m->m_len = total_len;
1158 #if NBPFILTER > 0
1159 		/*
1160 	 	 * Handle BPF listeners. Let the BPF user see the packet, but
1161 	 	 * don't pass it up to the ether_input() layer unless it's
1162 	 	 * a broadcast packet, multicast packet, matches our ethernet
1163 	 	 * address or the interface is in promiscuous mode. If we don't
1164 	 	 * want the packet, just forget it. We leave the mbuf in place
1165 	 	 * since it can be used again later.
1166 	 	 */
1167 		if (ifp->if_bpf) {
1168 			bpf_mtap(ifp->if_bpf, m);
1169 		}
1170 #endif
1171 		/* pass it on. */
1172 		ether_input_mbuf(ifp, m);
1173 	}
1174 
1175 	return(r);
1176 }
1177 
1178 /*
1179  * The RX-EOC condition hits when the ch_parm address hasn't been
1180  * initialized or the adapter reached a list with a forward pointer
1181  * of 0 (which indicates the end of the chain). In our case, this means
1182  * the card has hit the end of the receive buffer chain and we need to
1183  * empty out the buffers and shift the pointer back to the beginning again.
1184  */
1185 int tl_intvec_rxeoc(xsc, type)
1186 	void			*xsc;
1187 	u_int32_t		type;
1188 {
1189 	struct tl_softc		*sc;
1190 	int			r;
1191 	struct tl_chain_data	*cd;
1192 
1193 	sc = xsc;
1194 	cd = &sc->tl_cdata;
1195 
1196 	/* Flush out the receive queue and ack RXEOF interrupts. */
1197 	r = tl_intvec_rxeof(xsc, type);
1198 	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1199 	r = 1;
1200 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1201 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1202 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1203 	r |= (TL_CMD_GO|TL_CMD_RT);
1204 	return(r);
1205 }
1206 
1207 int tl_intvec_txeof(xsc, type)
1208 	void			*xsc;
1209 	u_int32_t		type;
1210 {
1211 	struct tl_softc		*sc;
1212 	int			r = 0;
1213 	struct tl_chain		*cur_tx;
1214 
1215 	sc = xsc;
1216 
1217 	/*
1218 	 * Go through our tx list and free mbufs for those
1219 	 * frames that have been sent.
1220 	 */
1221 	while (sc->tl_cdata.tl_tx_head != NULL) {
1222 		cur_tx = sc->tl_cdata.tl_tx_head;
1223 		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1224 			break;
1225 		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1226 
1227 		r++;
1228 		m_freem(cur_tx->tl_mbuf);
1229 		cur_tx->tl_mbuf = NULL;
1230 
1231 		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1232 		sc->tl_cdata.tl_tx_free = cur_tx;
1233 		if (!cur_tx->tl_ptr->tlist_fptr)
1234 			break;
1235 	}
1236 
1237 	return(r);
1238 }
1239 
1240 /*
1241  * The transmit end of channel interrupt. The adapter triggers this
1242  * interrupt to tell us it hit the end of the current transmit list.
1243  *
1244  * A note about this: it's possible for a condition to arise where
1245  * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1246  * You have to avoid this since the chip expects things to go in a
1247  * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1248  * When the TXEOF handler is called, it will free all of the transmitted
1249  * frames and reset the tx_head pointer to NULL. However, a TXEOC
1250  * interrupt should be received and acknowledged before any more frames
1251  * are queued for transmission. If tl_statrt() is called after TXEOF
1252  * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1253  * it could attempt to issue a transmit command prematurely.
1254  *
1255  * To guard against this, tl_start() will only issue transmit commands
1256  * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1257  * can set this flag once tl_start() has cleared it.
1258  */
1259 int tl_intvec_txeoc(xsc, type)
1260 	void			*xsc;
1261 	u_int32_t		type;
1262 {
1263 	struct tl_softc		*sc;
1264 	struct ifnet		*ifp;
1265 	u_int32_t		cmd;
1266 
1267 	sc = xsc;
1268 	ifp = &sc->arpcom.ac_if;
1269 
1270 	/* Clear the timeout timer. */
1271 	ifp->if_timer = 0;
1272 
1273 	if (sc->tl_cdata.tl_tx_head == NULL) {
1274 		ifp->if_flags &= ~IFF_OACTIVE;
1275 		sc->tl_cdata.tl_tx_tail = NULL;
1276 		sc->tl_txeoc = 1;
1277 	} else {
1278 		sc->tl_txeoc = 0;
1279 		/* First we have to ack the EOC interrupt. */
1280 		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1281 		/* Then load the address of the next TX list. */
1282 		CSR_WRITE_4(sc, TL_CH_PARM,
1283 		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1284 		/* Restart TX channel. */
1285 		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1286 		cmd &= ~TL_CMD_RT;
1287 		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1288 		CMD_PUT(sc, cmd);
1289 		return(0);
1290 	}
1291 
1292 	return(1);
1293 }
1294 
1295 int tl_intvec_adchk(xsc, type)
1296 	void			*xsc;
1297 	u_int32_t		type;
1298 {
1299 	struct tl_softc		*sc;
1300 
1301 	sc = xsc;
1302 
1303 	if (type)
1304 		printf("%s: adapter check: %x\n", sc->sc_dev.dv_xname,
1305 			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1306 
1307 	tl_softreset(sc, 1);
1308 	tl_stop(sc);
1309 	tl_init(sc);
1310 	CMD_SET(sc, TL_CMD_INTSON);
1311 
1312 	return(0);
1313 }
1314 
1315 int tl_intvec_netsts(xsc, type)
1316 	void			*xsc;
1317 	u_int32_t		type;
1318 {
1319 	struct tl_softc		*sc;
1320 	u_int16_t		netsts;
1321 
1322 	sc = xsc;
1323 
1324 	netsts = tl_dio_read16(sc, TL_NETSTS);
1325 	tl_dio_write16(sc, TL_NETSTS, netsts);
1326 
1327 	printf("%s: network status: %x\n", sc->sc_dev.dv_xname, netsts);
1328 
1329 	return(1);
1330 }
1331 
1332 int tl_intr(xsc)
1333 	void			*xsc;
1334 {
1335 	struct tl_softc		*sc;
1336 	struct ifnet		*ifp;
1337 	int			r = 0;
1338 	u_int32_t		type = 0;
1339 	u_int16_t		ints = 0;
1340 	u_int8_t		ivec = 0;
1341 
1342 	sc = xsc;
1343 
1344 	/* Disable interrupts */
1345 	ints = CSR_READ_2(sc, TL_HOST_INT);
1346 	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1347 	type = (ints << 16) & 0xFFFF0000;
1348 	ivec = (ints & TL_VEC_MASK) >> 5;
1349 	ints = (ints & TL_INT_MASK) >> 2;
1350 
1351 	ifp = &sc->arpcom.ac_if;
1352 
1353 	switch(ints) {
1354 	case (TL_INTR_INVALID):
1355 		/* Re-enable interrupts but don't ack this one. */
1356 		CMD_PUT(sc, type);
1357 		r = 0;
1358 		break;
1359 	case (TL_INTR_TXEOF):
1360 		r = tl_intvec_txeof((void *)sc, type);
1361 		break;
1362 	case (TL_INTR_TXEOC):
1363 		r = tl_intvec_txeoc((void *)sc, type);
1364 		break;
1365 	case (TL_INTR_STATOFLOW):
1366 		tl_stats_update(sc);
1367 		r = 1;
1368 		break;
1369 	case (TL_INTR_RXEOF):
1370 		r = tl_intvec_rxeof((void *)sc, type);
1371 		break;
1372 	case (TL_INTR_DUMMY):
1373 		printf("%s: got a dummy interrupt\n", sc->sc_dev.dv_xname);
1374 		r = 1;
1375 		break;
1376 	case (TL_INTR_ADCHK):
1377 		if (ivec)
1378 			r = tl_intvec_adchk((void *)sc, type);
1379 		else
1380 			r = tl_intvec_netsts((void *)sc, type);
1381 		break;
1382 	case (TL_INTR_RXEOC):
1383 		r = tl_intvec_rxeoc((void *)sc, type);
1384 		break;
1385 	default:
1386 		printf("%s: bogus interrupt type\n", sc->sc_dev.dv_xname);
1387 		break;
1388 	}
1389 
1390 	/* Re-enable interrupts */
1391 	if (r) {
1392 		CMD_PUT(sc, TL_CMD_ACK | r | type);
1393 	}
1394 
1395 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1396 		tl_start(ifp);
1397 
1398 	return r;
1399 }
1400 
1401 void tl_stats_update(xsc)
1402 	void			*xsc;
1403 {
1404 	struct tl_softc		*sc;
1405 	struct ifnet		*ifp;
1406 	struct tl_stats		tl_stats;
1407 	u_int32_t		*p;
1408 	int			s;
1409 
1410 	s = splimp();
1411 
1412 	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1413 
1414 	sc = xsc;
1415 	ifp = &sc->arpcom.ac_if;
1416 
1417 	p = (u_int32_t *)&tl_stats;
1418 
1419 	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1420 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1421 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1422 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1423 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1424 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1425 
1426 	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1427 	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1428 				tl_stats.tl_tx_multi_collision;
1429 	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1430 	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1431 			    tl_rx_overrun(tl_stats);
1432 	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1433 
1434 	if (tl_tx_underrun(tl_stats)) {
1435 		u_int8_t	tx_thresh;
1436 		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1437 		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1438 			tx_thresh >>= 4;
1439 			tx_thresh++;
1440 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1441 			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1442 		}
1443 	}
1444 
1445 	timeout_add(&sc->tl_stats_tmo, hz);
1446 
1447 	if (!sc->tl_bitrate)
1448 		mii_tick(&sc->sc_mii);
1449 
1450 	splx(s);
1451 	return;
1452 }
1453 
1454 /*
1455  * Encapsulate an mbuf chain in a list by coupling the mbuf data
1456  * pointers to the fragment pointers.
1457  */
1458 int tl_encap(sc, c, m_head)
1459 	struct tl_softc		*sc;
1460 	struct tl_chain		*c;
1461 	struct mbuf		*m_head;
1462 {
1463 	int			frag = 0;
1464 	struct tl_frag		*f = NULL;
1465 	int			total_len;
1466 	struct mbuf		*m;
1467 
1468 	/*
1469  	 * Start packing the mbufs in this chain into
1470 	 * the fragment pointers. Stop when we run out
1471  	 * of fragments or hit the end of the mbuf chain.
1472 	 */
1473 	m = m_head;
1474 	total_len = 0;
1475 
1476 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1477 		if (m->m_len != 0) {
1478 			if (frag == TL_MAXFRAGS)
1479 				break;
1480 			total_len+= m->m_len;
1481 			c->tl_ptr->tl_frag[frag].tlist_dadr =
1482 				vtophys(mtod(m, vaddr_t));
1483 			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1484 			frag++;
1485 		}
1486 	}
1487 
1488 	/*
1489 	 * Handle special cases.
1490 	 * Special case #1: we used up all 10 fragments, but
1491 	 * we have more mbufs left in the chain. Copy the
1492 	 * data into an mbuf cluster. Note that we don't
1493 	 * bother clearing the values in the other fragment
1494 	 * pointers/counters; it wouldn't gain us anything,
1495 	 * and would waste cycles.
1496 	 */
1497 	if (m != NULL) {
1498 		struct mbuf		*m_new = NULL;
1499 
1500 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1501 		if (m_new == NULL) {
1502 			return(1);
1503 		}
1504 		if (m_head->m_pkthdr.len > MHLEN) {
1505 			MCLGET(m_new, M_DONTWAIT);
1506 			if (!(m_new->m_flags & M_EXT)) {
1507 				m_freem(m_new);
1508 				return(1);
1509 			}
1510 		}
1511 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1512 					mtod(m_new, caddr_t));
1513 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1514 		m_freem(m_head);
1515 		m_head = m_new;
1516 		f = &c->tl_ptr->tl_frag[0];
1517 		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1518 		f->tlist_dcnt = total_len = m_new->m_len;
1519 		frag = 1;
1520 	}
1521 
1522 	/*
1523 	 * Special case #2: the frame is smaller than the minimum
1524 	 * frame size. We have to pad it to make the chip happy.
1525 	 */
1526 	if (total_len < TL_MIN_FRAMELEN) {
1527 		f = &c->tl_ptr->tl_frag[frag];
1528 		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1529 		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1530 		total_len += f->tlist_dcnt;
1531 		frag++;
1532 	}
1533 
1534 	c->tl_mbuf = m_head;
1535 	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1536 	c->tl_ptr->tlist_frsize = total_len;
1537 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1538 	c->tl_ptr->tlist_fptr = 0;
1539 
1540 	return(0);
1541 }
1542 
1543 /*
1544  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1545  * to the mbuf data regions directly in the transmit lists. We also save a
1546  * copy of the pointers since the transmit list fragment pointers are
1547  * physical addresses.
1548  */
1549 void tl_start(ifp)
1550 	struct ifnet		*ifp;
1551 {
1552 	struct tl_softc		*sc;
1553 	struct mbuf		*m_head = NULL;
1554 	u_int32_t		cmd;
1555 	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1556 
1557 	sc = ifp->if_softc;
1558 
1559 	/*
1560 	 * Check for an available queue slot. If there are none,
1561 	 * punt.
1562 	 */
1563 	if (sc->tl_cdata.tl_tx_free == NULL) {
1564 		ifp->if_flags |= IFF_OACTIVE;
1565 		return;
1566 	}
1567 
1568 	start_tx = sc->tl_cdata.tl_tx_free;
1569 
1570 	while(sc->tl_cdata.tl_tx_free != NULL) {
1571 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1572 		if (m_head == NULL)
1573 			break;
1574 
1575 		/* Pick a chain member off the free list. */
1576 		cur_tx = sc->tl_cdata.tl_tx_free;
1577 		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1578 
1579 		cur_tx->tl_next = NULL;
1580 
1581 		/* Pack the data into the list. */
1582 		tl_encap(sc, cur_tx, m_head);
1583 
1584 		/* Chain it together */
1585 		if (prev != NULL) {
1586 			prev->tl_next = cur_tx;
1587 			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1588 		}
1589 		prev = cur_tx;
1590 
1591 		/*
1592 		 * If there's a BPF listener, bounce a copy of this frame
1593 		 * to him.
1594 		 */
1595 #if NBPFILTER > 0
1596 		if (ifp->if_bpf)
1597 			bpf_mtap(ifp->if_bpf, cur_tx->tl_mbuf);
1598 #endif
1599 	}
1600 
1601 	/*
1602 	 * If there are no packets queued, bail.
1603 	 */
1604 	if (cur_tx == NULL)
1605 		return;
1606 
1607 	/*
1608 	 * That's all we can stands, we can't stands no more.
1609 	 * If there are no other transfers pending, then issue the
1610 	 * TX GO command to the adapter to start things moving.
1611 	 * Otherwise, just leave the data in the queue and let
1612 	 * the EOF/EOC interrupt handler send.
1613 	 */
1614 	if (sc->tl_cdata.tl_tx_head == NULL) {
1615 		sc->tl_cdata.tl_tx_head = start_tx;
1616 		sc->tl_cdata.tl_tx_tail = cur_tx;
1617 
1618 		if (sc->tl_txeoc) {
1619 			sc->tl_txeoc = 0;
1620 			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
1621 			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1622 			cmd &= ~TL_CMD_RT;
1623 			cmd |= TL_CMD_GO|TL_CMD_INTSON;
1624 			CMD_PUT(sc, cmd);
1625 		}
1626 	} else {
1627 		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1628 		sc->tl_cdata.tl_tx_tail = cur_tx;
1629 	}
1630 
1631 	/*
1632 	 * Set a timeout in case the chip goes out to lunch.
1633 	 */
1634 	ifp->if_timer = 10;
1635 
1636 	return;
1637 }
1638 
1639 void tl_init(xsc)
1640 	void			*xsc;
1641 {
1642 	struct tl_softc		*sc = xsc;
1643 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1644         int			s;
1645 
1646 	s = splimp();
1647 
1648 	ifp = &sc->arpcom.ac_if;
1649 
1650 	/*
1651 	 * Cancel pending I/O.
1652 	 */
1653 	tl_stop(sc);
1654 
1655 	/* Initialize TX FIFO threshold */
1656 	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1657 	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1658 
1659 	/* Set PCI burst size */
1660 	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
1661 
1662 	/*
1663 	 * Set 'capture all frames' bit for promiscuous mode.
1664 	 */
1665 	if (ifp->if_flags & IFF_PROMISC)
1666 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
1667 	else
1668 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
1669 
1670 	/*
1671 	 * Set capture broadcast bit to capture broadcast frames.
1672 	 */
1673 	if (ifp->if_flags & IFF_BROADCAST)
1674 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
1675 	else
1676 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
1677 
1678 	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
1679 
1680 	/* Init our MAC address */
1681 	tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
1682 
1683 	/* Init multicast filter, if needed. */
1684 	tl_setmulti(sc);
1685 
1686 	/* Init circular RX list. */
1687 	if (tl_list_rx_init(sc) == ENOBUFS) {
1688 		printf("%s: initialization failed: no memory for rx buffers\n",
1689 			sc->sc_dev.dv_xname);
1690 		tl_stop(sc);
1691 		splx(s);
1692 		return;
1693 	}
1694 
1695 	/* Init TX pointers. */
1696 	tl_list_tx_init(sc);
1697 
1698 	/* Enable PCI interrupts. */
1699 	CMD_SET(sc, TL_CMD_INTSON);
1700 
1701 	/* Load the address of the rx list */
1702 	CMD_SET(sc, TL_CMD_RT);
1703 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
1704 
1705 	if (!sc->tl_bitrate)
1706 		mii_mediachg(&sc->sc_mii);
1707 
1708 	/* Send the RX go command */
1709 	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
1710 
1711 	splx(s);
1712 
1713 	/* Start the stats update counter */
1714 	timeout_set(&sc->tl_stats_tmo, tl_stats_update, sc);
1715 	timeout_add(&sc->tl_stats_tmo, hz);
1716 	timeout_set(&sc->tl_wait_tmo, tl_wait_up, sc);
1717 	timeout_add(&sc->tl_wait_tmo, 2 * hz);
1718 
1719 	return;
1720 }
1721 
1722 /*
1723  * Set media options.
1724  */
1725 int
1726 tl_ifmedia_upd(ifp)
1727 	struct ifnet *ifp;
1728 {
1729 	struct tl_softc *sc = ifp->if_softc;
1730 
1731 	if (sc->tl_bitrate)
1732 		tl_setmode(sc, sc->ifmedia.ifm_media);
1733 	else
1734 		mii_mediachg(&sc->sc_mii);
1735 
1736 	return(0);
1737 }
1738 
1739 /*
1740  * Report current media status.
1741  */
1742 void tl_ifmedia_sts(ifp, ifmr)
1743 	struct ifnet		*ifp;
1744 	struct ifmediareq	*ifmr;
1745 {
1746 	struct tl_softc		*sc;
1747 	struct mii_data		*mii;
1748 
1749 	sc = ifp->if_softc;
1750 	mii = &sc->sc_mii;
1751 
1752 	ifmr->ifm_active = IFM_ETHER;
1753 	if (sc->tl_bitrate) {
1754 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
1755 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
1756 		else
1757 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1758 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
1759 			ifmr->ifm_active |= IFM_HDX;
1760 		else
1761 			ifmr->ifm_active |= IFM_FDX;
1762 		return;
1763 	} else {
1764 		mii_pollstat(mii);
1765 		ifmr->ifm_active = mii->mii_media_active;
1766 		ifmr->ifm_status = mii->mii_media_status;
1767 	}
1768 
1769 	return;
1770 }
1771 
1772 int tl_ioctl(ifp, command, data)
1773 	struct ifnet		*ifp;
1774 	u_long			command;
1775 	caddr_t			data;
1776 {
1777 	struct tl_softc		*sc = ifp->if_softc;
1778 	struct ifreq		*ifr = (struct ifreq *) data;
1779 	struct ifaddr *ifa = (struct ifaddr *)data;
1780 	int			s, error = 0;
1781 
1782 	s = splimp();
1783 
1784 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1785 		splx(s);
1786 		return error;
1787 	}
1788 
1789 	switch(command) {
1790 	case SIOCSIFADDR:
1791 		ifp->if_flags |= IFF_UP;
1792 		switch (ifa->ifa_addr->sa_family) {
1793 #ifdef INET
1794 		case AF_INET:
1795 			tl_init(sc);
1796 			arp_ifinit(&sc->arpcom, ifa);
1797 			break;
1798 #endif /* INET */
1799 		default:
1800 			tl_init(sc);
1801 			break;
1802 		}
1803 	case SIOCSIFFLAGS:
1804 		if (ifp->if_flags & IFF_UP) {
1805 			if (ifp->if_flags & IFF_RUNNING &&
1806 			    ifp->if_flags & IFF_PROMISC &&
1807 			    !(sc->tl_if_flags & IFF_PROMISC)) {
1808 				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
1809 				tl_setmulti(sc);
1810 			} else if (ifp->if_flags & IFF_RUNNING &&
1811 			    !(ifp->if_flags & IFF_PROMISC) &&
1812 			    sc->tl_if_flags & IFF_PROMISC) {
1813 				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
1814 				tl_setmulti(sc);
1815 			} else
1816 				tl_init(sc);
1817 		} else {
1818 			if (ifp->if_flags & IFF_RUNNING) {
1819 				tl_stop(sc);
1820 			}
1821 		}
1822 		sc->tl_if_flags = ifp->if_flags;
1823 		error = 0;
1824 		break;
1825 	case SIOCADDMULTI:
1826 	case SIOCDELMULTI:
1827 		error = (command == SIOCADDMULTI) ?
1828 		    ether_addmulti(ifr, &sc->arpcom) :
1829 		    ether_delmulti(ifr, &sc->arpcom);
1830 
1831 		if (error == ENETRESET) {
1832 			/*
1833 			 * Multicast list has changed; set the hardware
1834 			 * filter accordingly.
1835 			 */
1836 			tl_setmulti(sc);
1837 			error = 0;
1838 		}
1839 		break;
1840 	case SIOCSIFMEDIA:
1841 	case SIOCGIFMEDIA:
1842 		if (sc->tl_bitrate)
1843 			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1844 		else
1845 			error = ifmedia_ioctl(ifp, ifr,
1846 			    &sc->sc_mii.mii_media, command);
1847 		break;
1848 	default:
1849 		error = EINVAL;
1850 		break;
1851 	}
1852 
1853 	splx(s);
1854 
1855 	return(error);
1856 }
1857 
1858 void tl_watchdog(ifp)
1859 	struct ifnet		*ifp;
1860 {
1861 	struct tl_softc		*sc;
1862 
1863 	sc = ifp->if_softc;
1864 
1865 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1866 
1867 	ifp->if_oerrors++;
1868 
1869 	tl_softreset(sc, 1);
1870 	tl_init(sc);
1871 
1872 	return;
1873 }
1874 
1875 /*
1876  * Stop the adapter and free any mbufs allocated to the
1877  * RX and TX lists.
1878  */
1879 void tl_stop(sc)
1880 	struct tl_softc		*sc;
1881 {
1882 	register int		i;
1883 	struct ifnet		*ifp;
1884 
1885 	ifp = &sc->arpcom.ac_if;
1886 
1887 	/* Stop the stats updater. */
1888 	timeout_del(&sc->tl_stats_tmo);
1889 	timeout_del(&sc->tl_wait_tmo);
1890 
1891 	/* Stop the transmitter */
1892 	CMD_CLR(sc, TL_CMD_RT);
1893 	CMD_SET(sc, TL_CMD_STOP);
1894 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1895 
1896 	/* Stop the receiver */
1897 	CMD_SET(sc, TL_CMD_RT);
1898 	CMD_SET(sc, TL_CMD_STOP);
1899 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1900 
1901 	/*
1902 	 * Disable host interrupts.
1903 	 */
1904 	CMD_SET(sc, TL_CMD_INTSOFF);
1905 
1906 	/*
1907 	 * Clear list pointer.
1908 	 */
1909 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1910 
1911 	/*
1912 	 * Free the RX lists.
1913 	 */
1914 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1915 		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
1916 			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
1917 			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
1918 		}
1919 	}
1920 	bzero((char *)&sc->tl_ldata->tl_rx_list,
1921 		sizeof(sc->tl_ldata->tl_rx_list));
1922 
1923 	/*
1924 	 * Free the TX list buffers.
1925 	 */
1926 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1927 		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
1928 			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
1929 			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
1930 		}
1931 	}
1932 	bzero((char *)&sc->tl_ldata->tl_tx_list,
1933 		sizeof(sc->tl_ldata->tl_tx_list));
1934 
1935 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1936 
1937 	return;
1938 }
1939 
1940 int
1941 tl_probe(parent, match, aux)
1942 	struct device *parent;
1943 	void *match;
1944 	void *aux;
1945 {
1946 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1947 
1948 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) {
1949 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TI_TLAN)
1950 			return 1;
1951 		return 0;
1952 	}
1953 
1954 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ) {
1955 		switch (PCI_PRODUCT(pa->pa_id)) {
1956 		case PCI_PRODUCT_COMPAQ_N100TX:
1957 		case PCI_PRODUCT_COMPAQ_N10T:
1958 		case PCI_PRODUCT_COMPAQ_IntNF3P:
1959 		case PCI_PRODUCT_COMPAQ_DPNet100TX:
1960 		case PCI_PRODUCT_COMPAQ_IntPL100TX:
1961 		case PCI_PRODUCT_COMPAQ_DP4000:
1962 		case PCI_PRODUCT_COMPAQ_N10T2:
1963 		case PCI_PRODUCT_COMPAQ_N10_TX_UTP:
1964 		case PCI_PRODUCT_COMPAQ_NF3P:
1965 		case PCI_PRODUCT_COMPAQ_NF3P_BNC:
1966 			return 1;
1967 		}
1968 		return 0;
1969 	}
1970 
1971 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
1972 		switch (PCI_PRODUCT(pa->pa_id)) {
1973 		case PCI_PRODUCT_OLICOM_OC2183:
1974 		case PCI_PRODUCT_OLICOM_OC2325:
1975 		case PCI_PRODUCT_OLICOM_OC2326:
1976 			return 1;
1977 		}
1978 		return 0;
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 void
1985 tl_attach(parent, self, aux)
1986 	struct device *parent, *self;
1987 	void *aux;
1988 {
1989 	struct tl_softc *sc = (struct tl_softc *)self;
1990 	struct pci_attach_args *pa = aux;
1991 	pci_chipset_tag_t pc = pa->pa_pc;
1992 	pci_intr_handle_t ih;
1993 	const char *intrstr = NULL;
1994 	struct ifnet *ifp = &sc->arpcom.ac_if;
1995 	bus_addr_t iobase;
1996 	bus_size_t iosize;
1997 	u_int32_t command;
1998 	int i, rseg;
1999 	bus_dma_segment_t seg;
2000 	bus_dmamap_t dmamap;
2001 	caddr_t kva;
2002 
2003 	/*
2004 	 * Map control/status registers.
2005 	 */
2006 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2007 
2008 #ifdef TL_USEIOSPACE
2009 	if (!(command & PCI_COMMAND_IO_ENABLE)) {
2010 		printf(": failed to enable I/O ports\n");
2011 		return;
2012 	}
2013 	if (pci_io_find(pc, pa->pa_tag, TL_PCI_LOIO, &iobase, &iosize)) {
2014 		if (pci_io_find(pc, pa->pa_tag, TL_PCI_LOMEM,
2015 		    &iobase, &iosize)) {
2016 			printf(": failed to find i/o space\n");
2017 			return;
2018 		}
2019 	}
2020 	if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->tl_bhandle)) {
2021 		printf(": failed map i/o space\n");
2022 		return;
2023 	}
2024 	sc->tl_btag = pa->pa_iot;
2025 #else
2026 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2027 		printf(": failed to enable memory mapping\n");
2028 		return;
2029 	}
2030 	if (pci_mem_find(pc, pa->pa_tag, TL_PCI_LOMEM, &iobase, &iosize, NULL)){
2031 		if (pci_mem_find(pc, pa->pa_tag, TL_PCI_LOIO,
2032 		    &iobase, &iosize, NULL)) {
2033 			printf(": failed to find memory space\n");
2034 			return;
2035 		}
2036 	}
2037 	if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->tl_bhandle)) {
2038 		printf(": failed map memory space\n");
2039 		return;
2040 	}
2041 	sc->tl_btag = pa->pa_memt;
2042 #endif
2043 
2044 	/*
2045 	 * Manual wants the PCI latency timer jacked up to 0xff
2046 	 */
2047 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER);
2048 	command |= 0x0000ff00;
2049 	pci_conf_write(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER, command);
2050 
2051 	/*
2052 	 * Allocate our interrupt.
2053 	 */
2054 	if (pci_intr_map(pa, &ih)) {
2055 		printf(": couldn't map interrupt\n");
2056 		return;
2057 	}
2058 	intrstr = pci_intr_string(pc, ih);
2059 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, tl_intr, sc,
2060 	    self->dv_xname);
2061 	if (sc->sc_ih == NULL) {
2062 		printf(": could not establish interrupt");
2063 		if (intrstr != NULL)
2064 			printf(" at %s", intrstr);
2065 		printf("\n");
2066 		return;
2067 	}
2068 	printf(": %s", intrstr);
2069 
2070 	sc->sc_dmat = pa->pa_dmat;
2071 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct tl_list_data),
2072 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2073 		printf("%s: can't alloc list\n", sc->sc_dev.dv_xname);
2074 		return;
2075 	}
2076 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct tl_list_data),
2077 	    &kva, BUS_DMA_NOWAIT)) {
2078 		printf("%s: can't map dma buffers (%d bytes)\n",
2079 		    sc->sc_dev.dv_xname, sizeof(struct tl_list_data));
2080 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
2081 		return;
2082 	}
2083 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct tl_list_data), 1,
2084 	    sizeof(struct tl_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
2085 		printf("%s: can't create dma map\n", sc->sc_dev.dv_xname);
2086 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
2087 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
2088 		return;
2089 	}
2090 	if (bus_dmamap_load(sc->sc_dmat, dmamap, kva,
2091 	    sizeof(struct tl_list_data), NULL, BUS_DMA_NOWAIT)) {
2092 		printf("%s: can't load dma map\n", sc->sc_dev.dv_xname);
2093 		bus_dmamap_destroy(sc->sc_dmat, dmamap);
2094 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
2095 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
2096 		return;
2097 	}
2098 	sc->tl_ldata = (struct tl_list_data *)kva;
2099 	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
2100 
2101 	for (sc->tl_product = tl_prods; sc->tl_product->tp_vend;
2102 	     sc->tl_product++) {
2103 		if (sc->tl_product->tp_vend == PCI_VENDOR(pa->pa_id) &&
2104 		    sc->tl_product->tp_prod == PCI_PRODUCT(pa->pa_id))
2105 			break;
2106 	}
2107 
2108 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ ||
2109 	    PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI)
2110 		sc->tl_eeaddr = TL_EEPROM_EADDR;
2111 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM)
2112 		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
2113 
2114 	/*
2115 	 * Reset adapter.
2116 	 */
2117 	tl_softreset(sc, 1);
2118 	tl_hardreset(self);
2119 	DELAY(1000000);
2120 	tl_softreset(sc, 1);
2121 
2122 	/*
2123 	 * Get station address from the EEPROM.
2124 	 */
2125 	if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2126 	    sc->tl_eeaddr, ETHER_ADDR_LEN)) {
2127 		printf("\n%s: failed to read station address\n",
2128 		    sc->sc_dev.dv_xname);
2129 	    return;
2130 	}
2131 
2132 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
2133 		for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
2134 			u_int16_t *p;
2135 
2136 			p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
2137 			*p = ntohs(*p);
2138 		}
2139 	}
2140 
2141 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
2142 
2143 	ifp = &sc->arpcom.ac_if;
2144 	ifp->if_softc = sc;
2145 	ifp->if_mtu = ETHERMTU;
2146 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2147 	ifp->if_ioctl = tl_ioctl;
2148 	ifp->if_output = ether_output;
2149 	ifp->if_start = tl_start;
2150 	ifp->if_watchdog = tl_watchdog;
2151 	ifp->if_baudrate = 10000000;
2152 	IFQ_SET_MAXLEN(&ifp->if_snd, TL_TX_LIST_CNT - 1);
2153 	IFQ_SET_READY(&ifp->if_snd);
2154 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2155 
2156 	/*
2157 	 * Reset adapter (again).
2158 	 */
2159 	tl_softreset(sc, 1);
2160 	tl_hardreset(self);
2161 	DELAY(1000000);
2162 	tl_softreset(sc, 1);
2163 
2164 	/*
2165 	 * Do MII setup. If no PHYs are found, then this is a
2166 	 * bitrate ThunderLAN chip that only supports 10baseT
2167 	 * and AUI/BNC.
2168 	 */
2169 	sc->sc_mii.mii_ifp = ifp;
2170 	sc->sc_mii.mii_readreg = tl_miibus_readreg;
2171 	sc->sc_mii.mii_writereg = tl_miibus_writereg;
2172 	sc->sc_mii.mii_statchg = tl_miibus_statchg;
2173 	ifmedia_init(&sc->sc_mii.mii_media, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2174 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
2175 	    0);
2176 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2177 		struct ifmedia *ifm;
2178 		sc->tl_bitrate = 1;
2179 		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2180 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
2181 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2182 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2183 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
2184 		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
2185 		/* Reset again, this time setting bitrate mode. */
2186 		tl_softreset(sc, 1);
2187 		ifm = &sc->ifmedia;
2188 		ifm->ifm_media = ifm->ifm_cur->ifm_media;
2189 		tl_ifmedia_upd(ifp);
2190 	} else
2191 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2192 
2193 	/*
2194 	 * Attach us everywhere.
2195 	 */
2196 	if_attach(ifp);
2197 	ether_ifattach(ifp);
2198 
2199 	shutdownhook_establish(tl_shutdown, sc);
2200 }
2201 
2202 void
2203 tl_wait_up(xsc)
2204 	void *xsc;
2205 {
2206 	struct tl_softc *sc = xsc;
2207 	struct ifnet *ifp = &sc->arpcom.ac_if;
2208 
2209 	ifp->if_flags |= IFF_RUNNING;
2210 	ifp->if_flags &= ~IFF_OACTIVE;
2211 }
2212 
2213 void
2214 tl_shutdown(xsc)
2215 	void *xsc;
2216 {
2217 	struct tl_softc *sc = xsc;
2218 
2219 	tl_stop(sc);
2220 }
2221 
2222 struct cfattach tl_ca = {
2223 	sizeof(struct tl_softc), tl_probe, tl_attach
2224 };
2225 
2226 struct cfdriver tl_cd = {
2227 	0, "tl", DV_IFNET
2228 };
2229