1 /* $OpenBSD: if_tl.c,v 1.51 2011/06/22 16:44:27 tedu Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_tl.c,v 1.64 2001/02/06 10:11:48 phk Exp $ 35 */ 36 37 /* 38 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. 39 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, 40 * the National Semiconductor DP83840A physical interface and the 41 * Microchip Technology 24Cxx series serial EEPROM. 42 * 43 * Written using the following four documents: 44 * 45 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) 46 * National Semiconductor DP83840A data sheet (www.national.com) 47 * Microchip Technology 24C02C data sheet (www.microchip.com) 48 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) 49 * 50 * Written by Bill Paul <wpaul@ctr.columbia.edu> 51 * Electrical Engineering Department 52 * Columbia University, New York City 53 */ 54 55 /* 56 * Some notes about the ThunderLAN: 57 * 58 * The ThunderLAN controller is a single chip containing PCI controller 59 * logic, approximately 3K of on-board SRAM, a LAN controller, and media 60 * independent interface (MII) bus. The MII allows the ThunderLAN chip to 61 * control up to 32 different physical interfaces (PHYs). The ThunderLAN 62 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller 63 * to act as a complete ethernet interface. 64 * 65 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards 66 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec 67 * in full or half duplex. Some of the Compaq Deskpro machines use a 68 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters 69 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in 70 * concert with the ThunderLAN's internal PHY to provide full 10/100 71 * support. This is cheaper than using a standalone external PHY for both 72 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. 73 * A serial EEPROM is also attached to the ThunderLAN chip to provide 74 * power-up default register settings and for storing the adapter's 75 * station address. Although not supported by this driver, the ThunderLAN 76 * chip can also be connected to token ring PHYs. 77 * 78 * The ThunderLAN has a set of registers which can be used to issue 79 * commands, acknowledge interrupts, and to manipulate other internal 80 * registers on its DIO bus. The primary registers can be accessed 81 * using either programmed I/O (inb/outb) or via PCI memory mapping, 82 * depending on how the card is configured during the PCI probing 83 * phase. It is even possible to have both PIO and memory mapped 84 * access turned on at the same time. 85 * 86 * Frame reception and transmission with the ThunderLAN chip is done 87 * using frame 'lists.' A list structure looks more or less like this: 88 * 89 * struct tl_frag { 90 * u_int32_t fragment_address; 91 * u_int32_t fragment_size; 92 * }; 93 * struct tl_list { 94 * u_int32_t forward_pointer; 95 * u_int16_t cstat; 96 * u_int16_t frame_size; 97 * struct tl_frag fragments[10]; 98 * }; 99 * 100 * The forward pointer in the list header can be either a 0 or the address 101 * of another list, which allows several lists to be linked together. Each 102 * list contains up to 10 fragment descriptors. This means the chip allows 103 * ethernet frames to be broken up into up to 10 chunks for transfer to 104 * and from the SRAM. Note that the forward pointer and fragment buffer 105 * addresses are physical memory addresses, not virtual. Note also that 106 * a single ethernet frame can not span lists: if the host wants to 107 * transmit a frame and the frame data is split up over more than 10 108 * buffers, the frame has to collapsed before it can be transmitted. 109 * 110 * To receive frames, the driver sets up a number of lists and populates 111 * the fragment descriptors, then it sends an RX GO command to the chip. 112 * When a frame is received, the chip will DMA it into the memory regions 113 * specified by the fragment descriptors and then trigger an RX 'end of 114 * frame interrupt' when done. The driver may choose to use only one 115 * fragment per list; this may result is slighltly less efficient use 116 * of memory in exchange for improving performance. 117 * 118 * To transmit frames, the driver again sets up lists and fragment 119 * descriptors, only this time the buffers contain frame data that 120 * is to be DMA'ed into the chip instead of out of it. Once the chip 121 * has transferred the data into its on-board SRAM, it will trigger a 122 * TX 'end of frame' interrupt. It will also generate an 'end of channel' 123 * interrupt when it reaches the end of the list. 124 */ 125 126 /* 127 * Some notes about this driver: 128 * 129 * The ThunderLAN chip provides a couple of different ways to organize 130 * reception, transmission and interrupt handling. The simplest approach 131 * is to use one list each for transmission and reception. In this mode, 132 * the ThunderLAN will generate two interrupts for every received frame 133 * (one RX EOF and one RX EOC) and two for each transmitted frame (one 134 * TX EOF and one TX EOC). This may make the driver simpler but it hurts 135 * performance to have to handle so many interrupts. 136 * 137 * Initially I wanted to create a circular list of receive buffers so 138 * that the ThunderLAN chip would think there was an infinitely long 139 * receive channel and never deliver an RXEOC interrupt. However this 140 * doesn't work correctly under heavy load: while the manual says the 141 * chip will trigger an RXEOF interrupt each time a frame is copied into 142 * memory, you can't count on the chip waiting around for you to acknowledge 143 * the interrupt before it starts trying to DMA the next frame. The result 144 * is that the chip might traverse the entire circular list and then wrap 145 * around before you have a chance to do anything about it. Consequently, 146 * the receive list is terminated (with a 0 in the forward pointer in the 147 * last element). Each time an RXEOF interrupt arrives, the used list 148 * is shifted to the end of the list. This gives the appearance of an 149 * infinitely large RX chain so long as the driver doesn't fall behind 150 * the chip and allow all of the lists to be filled up. 151 * 152 * If all the lists are filled, the adapter will deliver an RX 'end of 153 * channel' interrupt when it hits the 0 forward pointer at the end of 154 * the chain. The RXEOC handler then cleans out the RX chain and resets 155 * the list head pointer in the ch_parm register and restarts the receiver. 156 * 157 * For frame transmission, it is possible to program the ThunderLAN's 158 * transmit interrupt threshold so that the chip can acknowledge multiple 159 * lists with only a single TX EOF interrupt. This allows the driver to 160 * queue several frames in one shot, and only have to handle a total 161 * two interrupts (one TX EOF and one TX EOC) no matter how many frames 162 * are transmitted. Frame transmission is done directly out of the 163 * mbufs passed to the tl_start() routine via the interface send queue. 164 * The driver simply sets up the fragment descriptors in the transmit 165 * lists to point to the mbuf data regions and sends a TX GO command. 166 * 167 * Note that since the RX and TX lists themselves are always used 168 * only by the driver, the are malloc()ed once at driver initialization 169 * time and never free()ed. 170 * 171 * Also, in order to remain as platform independent as possible, this 172 * driver uses memory mapped register access to manipulate the card 173 * as opposed to programmed I/O. This avoids the use of the inb/outb 174 * (and related) instructions which are specific to the i386 platform. 175 * 176 * Using these techniques, this driver achieves very high performance 177 * by minimizing the amount of interrupts generated during large 178 * transfers and by completely avoiding buffer copies. Frame transfer 179 * to and from the ThunderLAN chip is performed entirely by the chip 180 * itself thereby reducing the load on the host CPU. 181 */ 182 183 #include "bpfilter.h" 184 185 #include <sys/param.h> 186 #include <sys/systm.h> 187 #include <sys/sockio.h> 188 #include <sys/mbuf.h> 189 #include <sys/malloc.h> 190 #include <sys/kernel.h> 191 #include <sys/socket.h> 192 #include <sys/device.h> 193 #include <sys/timeout.h> 194 195 #include <net/if.h> 196 197 #ifdef INET 198 #include <netinet/in.h> 199 #include <netinet/in_systm.h> 200 #include <netinet/in_var.h> 201 #include <netinet/ip.h> 202 #include <netinet/if_ether.h> 203 #endif 204 205 #include <net/if_dl.h> 206 #include <net/if_media.h> 207 208 #if NBPFILTER > 0 209 #include <net/bpf.h> 210 #endif 211 212 #include <uvm/uvm_extern.h> /* for vtophys */ 213 #define VTOPHYS(v) vtophys((vaddr_t)(v)) 214 215 #include <dev/mii/mii.h> 216 #include <dev/mii/miivar.h> 217 218 #include <dev/pci/pcireg.h> 219 #include <dev/pci/pcivar.h> 220 #include <dev/pci/pcidevs.h> 221 222 /* 223 * Default to using PIO register access mode to pacify certain 224 * laptop docking stations with built-in ThunderLAN chips that 225 * don't seem to handle memory mapped mode properly. 226 */ 227 #define TL_USEIOSPACE 228 229 #include <dev/pci/if_tlreg.h> 230 #include <dev/mii/tlphyvar.h> 231 232 const struct tl_products tl_prods[] = { 233 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T }, 234 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5 }, 235 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2 }, 236 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T }, 237 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T }, 238 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T }, 239 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2 }, 240 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5 }, 241 { PCI_VENDOR_TI, PCI_PRODUCT_TI_TLAN, 0 }, 242 { 0, 0, 0 } 243 }; 244 245 int tl_probe(struct device *, void *, void *); 246 void tl_attach(struct device *, struct device *, void *); 247 void tl_wait_up(void *); 248 int tl_intvec_rxeoc(void *, u_int32_t); 249 int tl_intvec_txeoc(void *, u_int32_t); 250 int tl_intvec_txeof(void *, u_int32_t); 251 int tl_intvec_rxeof(void *, u_int32_t); 252 int tl_intvec_adchk(void *, u_int32_t); 253 int tl_intvec_netsts(void *, u_int32_t); 254 255 int tl_newbuf(struct tl_softc *, 256 struct tl_chain_onefrag *); 257 void tl_stats_update(void *); 258 int tl_encap(struct tl_softc *, struct tl_chain *, 259 struct mbuf *); 260 261 int tl_intr(void *); 262 void tl_start(struct ifnet *); 263 int tl_ioctl(struct ifnet *, u_long, caddr_t); 264 void tl_init(void *); 265 void tl_stop(struct tl_softc *); 266 void tl_watchdog(struct ifnet *); 267 int tl_ifmedia_upd(struct ifnet *); 268 void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 269 270 u_int8_t tl_eeprom_putbyte(struct tl_softc *, int); 271 u_int8_t tl_eeprom_getbyte(struct tl_softc *, 272 int, u_int8_t *); 273 int tl_read_eeprom(struct tl_softc *, caddr_t, int, int); 274 275 void tl_mii_sync(struct tl_softc *); 276 void tl_mii_send(struct tl_softc *, u_int32_t, int); 277 int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *); 278 int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *); 279 int tl_miibus_readreg(struct device *, int, int); 280 void tl_miibus_writereg(struct device *, int, int, int); 281 void tl_miibus_statchg(struct device *); 282 283 void tl_setmode(struct tl_softc *, int); 284 #if 0 285 int tl_calchash(caddr_t); 286 #endif 287 void tl_setmulti(struct tl_softc *); 288 void tl_setfilt(struct tl_softc *, caddr_t, int); 289 void tl_softreset(struct tl_softc *, int); 290 void tl_hardreset(struct device *); 291 int tl_list_rx_init(struct tl_softc *); 292 int tl_list_tx_init(struct tl_softc *); 293 294 u_int8_t tl_dio_read8(struct tl_softc *, int); 295 u_int16_t tl_dio_read16(struct tl_softc *, int); 296 u_int32_t tl_dio_read32(struct tl_softc *, int); 297 void tl_dio_write8(struct tl_softc *, int, int); 298 void tl_dio_write16(struct tl_softc *, int, int); 299 void tl_dio_write32(struct tl_softc *, int, int); 300 void tl_dio_setbit(struct tl_softc *, int, int); 301 void tl_dio_clrbit(struct tl_softc *, int, int); 302 void tl_dio_setbit16(struct tl_softc *, int, int); 303 void tl_dio_clrbit16(struct tl_softc *, int, int); 304 305 u_int8_t tl_dio_read8(sc, reg) 306 struct tl_softc *sc; 307 int reg; 308 { 309 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 310 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); 311 } 312 313 u_int16_t tl_dio_read16(sc, reg) 314 struct tl_softc *sc; 315 int reg; 316 { 317 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 318 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); 319 } 320 321 u_int32_t tl_dio_read32(sc, reg) 322 struct tl_softc *sc; 323 int reg; 324 { 325 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 326 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); 327 } 328 329 void tl_dio_write8(sc, reg, val) 330 struct tl_softc *sc; 331 int reg; 332 int val; 333 { 334 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 335 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); 336 return; 337 } 338 339 void tl_dio_write16(sc, reg, val) 340 struct tl_softc *sc; 341 int reg; 342 int val; 343 { 344 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 345 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); 346 return; 347 } 348 349 void tl_dio_write32(sc, reg, val) 350 struct tl_softc *sc; 351 int reg; 352 int val; 353 { 354 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 355 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); 356 return; 357 } 358 359 void tl_dio_setbit(sc, reg, bit) 360 struct tl_softc *sc; 361 int reg; 362 int bit; 363 { 364 u_int8_t f; 365 366 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 367 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 368 f |= bit; 369 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 370 371 return; 372 } 373 374 void tl_dio_clrbit(sc, reg, bit) 375 struct tl_softc *sc; 376 int reg; 377 int bit; 378 { 379 u_int8_t f; 380 381 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 382 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 383 f &= ~bit; 384 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 385 386 return; 387 } 388 389 void tl_dio_setbit16(sc, reg, bit) 390 struct tl_softc *sc; 391 int reg; 392 int bit; 393 { 394 u_int16_t f; 395 396 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 397 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 398 f |= bit; 399 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 400 401 return; 402 } 403 404 void tl_dio_clrbit16(sc, reg, bit) 405 struct tl_softc *sc; 406 int reg; 407 int bit; 408 { 409 u_int16_t f; 410 411 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 412 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 413 f &= ~bit; 414 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 415 416 return; 417 } 418 419 /* 420 * Send an instruction or address to the EEPROM, check for ACK. 421 */ 422 u_int8_t tl_eeprom_putbyte(sc, byte) 423 struct tl_softc *sc; 424 int byte; 425 { 426 int i, ack = 0; 427 428 /* 429 * Make sure we're in TX mode. 430 */ 431 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); 432 433 /* 434 * Feed in each bit and strobe the clock. 435 */ 436 for (i = 0x80; i; i >>= 1) { 437 if (byte & i) { 438 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); 439 } else { 440 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); 441 } 442 DELAY(1); 443 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 444 DELAY(1); 445 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 446 } 447 448 /* 449 * Turn off TX mode. 450 */ 451 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 452 453 /* 454 * Check for ack. 455 */ 456 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 457 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; 458 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 459 460 return(ack); 461 } 462 463 /* 464 * Read a byte of data stored in the EEPROM at address 'addr.' 465 */ 466 u_int8_t tl_eeprom_getbyte(sc, addr, dest) 467 struct tl_softc *sc; 468 int addr; 469 u_int8_t *dest; 470 { 471 int i; 472 u_int8_t byte = 0; 473 474 tl_dio_write8(sc, TL_NETSIO, 0); 475 476 EEPROM_START; 477 478 /* 479 * Send write control code to EEPROM. 480 */ 481 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 482 printf("%s: failed to send write command, status: %x\n", 483 sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO)); 484 return(1); 485 } 486 487 /* 488 * Send address of byte we want to read. 489 */ 490 if (tl_eeprom_putbyte(sc, addr)) { 491 printf("%s: failed to send address, status: %x\n", 492 sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO)); 493 return(1); 494 } 495 496 EEPROM_STOP; 497 EEPROM_START; 498 /* 499 * Send read control code to EEPROM. 500 */ 501 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 502 printf("%s: failed to send write command, status: %x\n", 503 sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO)); 504 return(1); 505 } 506 507 /* 508 * Start reading bits from EEPROM. 509 */ 510 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 511 for (i = 0x80; i; i >>= 1) { 512 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 513 DELAY(1); 514 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) 515 byte |= i; 516 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 517 DELAY(1); 518 } 519 520 EEPROM_STOP; 521 522 /* 523 * No ACK generated for read, so just return byte. 524 */ 525 526 *dest = byte; 527 528 return(0); 529 } 530 531 /* 532 * Read a sequence of bytes from the EEPROM. 533 */ 534 int tl_read_eeprom(sc, dest, off, cnt) 535 struct tl_softc *sc; 536 caddr_t dest; 537 int off; 538 int cnt; 539 { 540 int err = 0, i; 541 u_int8_t byte = 0; 542 543 for (i = 0; i < cnt; i++) { 544 err = tl_eeprom_getbyte(sc, off + i, &byte); 545 if (err) 546 break; 547 *(dest + i) = byte; 548 } 549 550 return(err ? 1 : 0); 551 } 552 553 void tl_mii_sync(sc) 554 struct tl_softc *sc; 555 { 556 int i; 557 558 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 559 560 for (i = 0; i < 32; i++) { 561 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 562 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 563 } 564 565 return; 566 } 567 568 void tl_mii_send(sc, bits, cnt) 569 struct tl_softc *sc; 570 u_int32_t bits; 571 int cnt; 572 { 573 int i; 574 575 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 576 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 577 if (bits & i) { 578 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); 579 } else { 580 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); 581 } 582 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 583 } 584 } 585 586 int tl_mii_readreg(sc, frame) 587 struct tl_softc *sc; 588 struct tl_mii_frame *frame; 589 590 { 591 int i, ack, s; 592 int minten = 0; 593 594 s = splnet(); 595 596 tl_mii_sync(sc); 597 598 /* 599 * Set up frame for RX. 600 */ 601 frame->mii_stdelim = TL_MII_STARTDELIM; 602 frame->mii_opcode = TL_MII_READOP; 603 frame->mii_turnaround = 0; 604 frame->mii_data = 0; 605 606 /* 607 * Turn off MII interrupt by forcing MINTEN low. 608 */ 609 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 610 if (minten) { 611 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 612 } 613 614 /* 615 * Turn on data xmit. 616 */ 617 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 618 619 /* 620 * Send command/address info. 621 */ 622 tl_mii_send(sc, frame->mii_stdelim, 2); 623 tl_mii_send(sc, frame->mii_opcode, 2); 624 tl_mii_send(sc, frame->mii_phyaddr, 5); 625 tl_mii_send(sc, frame->mii_regaddr, 5); 626 627 /* 628 * Turn off xmit. 629 */ 630 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 631 632 /* Idle bit */ 633 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 634 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 635 636 /* Check for ack */ 637 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 638 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; 639 640 /* Complete the cycle */ 641 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 642 643 /* 644 * Now try reading data bits. If the ack failed, we still 645 * need to clock through 16 cycles to keep the PHYs in sync. 646 */ 647 if (ack) { 648 for(i = 0; i < 16; i++) { 649 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 650 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 651 } 652 goto fail; 653 } 654 655 for (i = 0x8000; i; i >>= 1) { 656 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 657 if (!ack) { 658 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) 659 frame->mii_data |= i; 660 } 661 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 662 } 663 664 fail: 665 666 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 667 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 668 669 /* Reenable interrupts */ 670 if (minten) { 671 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 672 } 673 674 splx(s); 675 676 if (ack) 677 return(1); 678 return(0); 679 } 680 681 int tl_mii_writereg(sc, frame) 682 struct tl_softc *sc; 683 struct tl_mii_frame *frame; 684 685 { 686 int s; 687 int minten; 688 689 tl_mii_sync(sc); 690 691 s = splnet(); 692 /* 693 * Set up frame for TX. 694 */ 695 696 frame->mii_stdelim = TL_MII_STARTDELIM; 697 frame->mii_opcode = TL_MII_WRITEOP; 698 frame->mii_turnaround = TL_MII_TURNAROUND; 699 700 /* 701 * Turn off MII interrupt by forcing MINTEN low. 702 */ 703 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 704 if (minten) { 705 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 706 } 707 708 /* 709 * Turn on data output. 710 */ 711 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 712 713 tl_mii_send(sc, frame->mii_stdelim, 2); 714 tl_mii_send(sc, frame->mii_opcode, 2); 715 tl_mii_send(sc, frame->mii_phyaddr, 5); 716 tl_mii_send(sc, frame->mii_regaddr, 5); 717 tl_mii_send(sc, frame->mii_turnaround, 2); 718 tl_mii_send(sc, frame->mii_data, 16); 719 720 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 721 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 722 723 /* 724 * Turn off xmit. 725 */ 726 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 727 728 /* Reenable interrupts */ 729 if (minten) 730 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 731 732 splx(s); 733 734 return(0); 735 } 736 737 int tl_miibus_readreg(dev, phy, reg) 738 struct device *dev; 739 int phy, reg; 740 { 741 struct tl_softc *sc = (struct tl_softc *)dev; 742 struct tl_mii_frame frame; 743 744 bzero(&frame, sizeof(frame)); 745 746 frame.mii_phyaddr = phy; 747 frame.mii_regaddr = reg; 748 tl_mii_readreg(sc, &frame); 749 750 return(frame.mii_data); 751 } 752 753 void tl_miibus_writereg(dev, phy, reg, data) 754 struct device *dev; 755 int phy, reg, data; 756 { 757 struct tl_softc *sc = (struct tl_softc *)dev; 758 struct tl_mii_frame frame; 759 760 bzero(&frame, sizeof(frame)); 761 762 frame.mii_phyaddr = phy; 763 frame.mii_regaddr = reg; 764 frame.mii_data = data; 765 766 tl_mii_writereg(sc, &frame); 767 } 768 769 void tl_miibus_statchg(dev) 770 struct device *dev; 771 { 772 struct tl_softc *sc = (struct tl_softc *)dev; 773 774 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) { 775 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 776 } else { 777 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 778 } 779 } 780 781 /* 782 * Set modes for bitrate devices. 783 */ 784 void tl_setmode(sc, media) 785 struct tl_softc *sc; 786 int media; 787 { 788 if (IFM_SUBTYPE(media) == IFM_10_5) 789 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 790 if (IFM_SUBTYPE(media) == IFM_10_T) { 791 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 792 if ((media & IFM_GMASK) == IFM_FDX) { 793 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 794 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 795 } else { 796 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 797 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 798 } 799 } 800 } 801 802 #if 0 803 /* 804 * Calculate the hash of a MAC address for programming the multicast hash 805 * table. This hash is simply the address split into 6-bit chunks 806 * XOR'd, e.g. 807 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 808 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 809 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then 810 * the folded 24-bit value is split into 6-bit portions and XOR'd. 811 */ 812 int tl_calchash(addr) 813 caddr_t addr; 814 { 815 int t; 816 817 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | 818 (addr[2] ^ addr[5]); 819 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; 820 } 821 #endif 822 823 /* 824 * The ThunderLAN has a perfect MAC address filter in addition to 825 * the multicast hash filter. The perfect filter can be programmed 826 * with up to four MAC addresses. The first one is always used to 827 * hold the station address, which leaves us free to use the other 828 * three for multicast addresses. 829 */ 830 void tl_setfilt(sc, addr, slot) 831 struct tl_softc *sc; 832 caddr_t addr; 833 int slot; 834 { 835 int i; 836 u_int16_t regaddr; 837 838 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); 839 840 for (i = 0; i < ETHER_ADDR_LEN; i++) 841 tl_dio_write8(sc, regaddr + i, *(addr + i)); 842 843 return; 844 } 845 846 /* 847 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly 848 * linked list. This is fine, except addresses are added from the head 849 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") 850 * group to always be in the perfect filter, but as more groups are added, 851 * the 224.0.0.1 entry (which is always added first) gets pushed down 852 * the list and ends up at the tail. So after 3 or 4 multicast groups 853 * are added, the all-hosts entry gets pushed out of the perfect filter 854 * and into the hash table. 855 * 856 * Because the multicast list is a doubly-linked list as opposed to a 857 * circular queue, we don't have the ability to just grab the tail of 858 * the list and traverse it backwards. Instead, we have to traverse 859 * the list once to find the tail, then traverse it again backwards to 860 * update the multicast filter. 861 */ 862 void tl_setmulti(sc) 863 struct tl_softc *sc; 864 { 865 struct ifnet *ifp; 866 u_int32_t hashes[2] = { 0, 0 }; 867 int h; 868 struct arpcom *ac = &sc->arpcom; 869 struct ether_multistep step; 870 struct ether_multi *enm; 871 ifp = &sc->arpcom.ac_if; 872 873 tl_dio_write32(sc, TL_HASH1, 0); 874 tl_dio_write32(sc, TL_HASH2, 0); 875 876 ifp->if_flags &= ~IFF_ALLMULTI; 877 #if 0 878 ETHER_FIRST_MULTI(step, ac, enm); 879 while (enm != NULL) { 880 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) { 881 h = tl_calchash(enm->enm_addrlo); 882 hashes[h/32] |= (1 << (h % 32)); 883 } else { 884 hashes[0] = hashes[1] = 0xffffffff; 885 ifp->if_flags |= IFF_ALLMULTI; 886 break; 887 } 888 ETHER_NEXT_MULTI(step, enm); 889 } 890 #else 891 ETHER_FIRST_MULTI(step, ac, enm); 892 h = 0; 893 while (enm != NULL) { 894 h++; 895 ETHER_NEXT_MULTI(step, enm); 896 } 897 if (h) { 898 hashes[0] = hashes[1] = 0xffffffff; 899 ifp->if_flags |= IFF_ALLMULTI; 900 } else { 901 hashes[0] = hashes[1] = 0x00000000; 902 } 903 #endif 904 905 tl_dio_write32(sc, TL_HASH1, hashes[0]); 906 tl_dio_write32(sc, TL_HASH2, hashes[1]); 907 908 return; 909 } 910 911 /* 912 * This routine is recommended by the ThunderLAN manual to insure that 913 * the internal PHY is powered up correctly. It also recommends a one 914 * second pause at the end to 'wait for the clocks to start' but in my 915 * experience this isn't necessary. 916 */ 917 void tl_hardreset(dev) 918 struct device *dev; 919 { 920 struct tl_softc *sc = (struct tl_softc *)dev; 921 int i; 922 u_int16_t flags; 923 924 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; 925 926 for (i =0 ; i < MII_NPHY; i++) 927 tl_miibus_writereg(dev, i, MII_BMCR, flags); 928 929 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); 930 tl_mii_sync(sc); 931 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); 932 933 DELAY(5000); 934 return; 935 } 936 937 void tl_softreset(sc, internal) 938 struct tl_softc *sc; 939 int internal; 940 { 941 u_int32_t cmd, dummy, i; 942 943 /* Assert the adapter reset bit. */ 944 CMD_SET(sc, TL_CMD_ADRST); 945 /* Turn off interrupts */ 946 CMD_SET(sc, TL_CMD_INTSOFF); 947 948 /* First, clear the stats registers. */ 949 for (i = 0; i < 5; i++) 950 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); 951 952 /* Clear Areg and Hash registers */ 953 for (i = 0; i < 8; i++) 954 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); 955 956 /* 957 * Set up Netconfig register. Enable one channel and 958 * one fragment mode. 959 */ 960 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); 961 if (internal && !sc->tl_bitrate) { 962 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 963 } else { 964 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 965 } 966 967 /* Handle cards with bitrate devices. */ 968 if (sc->tl_bitrate) 969 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); 970 971 /* 972 * Load adapter irq pacing timer and tx threshold. 973 * We make the transmit threshold 1 initially but we may 974 * change that later. 975 */ 976 cmd = CSR_READ_4(sc, TL_HOSTCMD); 977 cmd |= TL_CMD_NES; 978 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); 979 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); 980 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); 981 982 /* Unreset the MII */ 983 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); 984 985 /* Take the adapter out of reset */ 986 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); 987 988 /* Wait for things to settle down a little. */ 989 DELAY(500); 990 991 return; 992 } 993 994 /* 995 * Initialize the transmit lists. 996 */ 997 int tl_list_tx_init(sc) 998 struct tl_softc *sc; 999 { 1000 struct tl_chain_data *cd; 1001 struct tl_list_data *ld; 1002 int i; 1003 1004 cd = &sc->tl_cdata; 1005 ld = sc->tl_ldata; 1006 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1007 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; 1008 if (i == (TL_TX_LIST_CNT - 1)) 1009 cd->tl_tx_chain[i].tl_next = NULL; 1010 else 1011 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; 1012 } 1013 1014 cd->tl_tx_free = &cd->tl_tx_chain[0]; 1015 cd->tl_tx_tail = cd->tl_tx_head = NULL; 1016 sc->tl_txeoc = 1; 1017 1018 return(0); 1019 } 1020 1021 /* 1022 * Initialize the RX lists and allocate mbufs for them. 1023 */ 1024 int tl_list_rx_init(sc) 1025 struct tl_softc *sc; 1026 { 1027 struct tl_chain_data *cd; 1028 struct tl_list_data *ld; 1029 int i; 1030 1031 cd = &sc->tl_cdata; 1032 ld = sc->tl_ldata; 1033 1034 for (i = 0; i < TL_RX_LIST_CNT; i++) { 1035 cd->tl_rx_chain[i].tl_ptr = 1036 (struct tl_list_onefrag *)&ld->tl_rx_list[i]; 1037 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) 1038 return(ENOBUFS); 1039 if (i == (TL_RX_LIST_CNT - 1)) { 1040 cd->tl_rx_chain[i].tl_next = NULL; 1041 ld->tl_rx_list[i].tlist_fptr = 0; 1042 } else { 1043 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; 1044 ld->tl_rx_list[i].tlist_fptr = 1045 VTOPHYS(&ld->tl_rx_list[i + 1]); 1046 } 1047 } 1048 1049 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1050 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1051 1052 return(0); 1053 } 1054 1055 int tl_newbuf(sc, c) 1056 struct tl_softc *sc; 1057 struct tl_chain_onefrag *c; 1058 { 1059 struct mbuf *m_new = NULL; 1060 1061 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1062 if (m_new == NULL) { 1063 return(ENOBUFS); 1064 } 1065 1066 MCLGET(m_new, M_DONTWAIT); 1067 if (!(m_new->m_flags & M_EXT)) { 1068 m_freem(m_new); 1069 return(ENOBUFS); 1070 } 1071 1072 #ifdef __alpha__ 1073 m_new->m_data += 2; 1074 #endif 1075 1076 c->tl_mbuf = m_new; 1077 c->tl_next = NULL; 1078 c->tl_ptr->tlist_frsize = MCLBYTES; 1079 c->tl_ptr->tlist_fptr = 0; 1080 c->tl_ptr->tl_frag.tlist_dadr = VTOPHYS(mtod(m_new, caddr_t)); 1081 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1082 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1083 1084 return(0); 1085 } 1086 /* 1087 * Interrupt handler for RX 'end of frame' condition (EOF). This 1088 * tells us that a full ethernet frame has been captured and we need 1089 * to handle it. 1090 * 1091 * Reception is done using 'lists' which consist of a header and a 1092 * series of 10 data count/data address pairs that point to buffers. 1093 * Initially you're supposed to create a list, populate it with pointers 1094 * to buffers, then load the physical address of the list into the 1095 * ch_parm register. The adapter is then supposed to DMA the received 1096 * frame into the buffers for you. 1097 * 1098 * To make things as fast as possible, we have the chip DMA directly 1099 * into mbufs. This saves us from having to do a buffer copy: we can 1100 * just hand the mbufs directly to ether_input(). Once the frame has 1101 * been sent on its way, the 'list' structure is assigned a new buffer 1102 * and moved to the end of the RX chain. As long we we stay ahead of 1103 * the chip, it will always think it has an endless receive channel. 1104 * 1105 * If we happen to fall behind and the chip manages to fill up all of 1106 * the buffers, it will generate an end of channel interrupt and wait 1107 * for us to empty the chain and restart the receiver. 1108 */ 1109 int tl_intvec_rxeof(xsc, type) 1110 void *xsc; 1111 u_int32_t type; 1112 { 1113 struct tl_softc *sc; 1114 int r = 0, total_len = 0; 1115 struct ether_header *eh; 1116 struct mbuf *m; 1117 struct ifnet *ifp; 1118 struct tl_chain_onefrag *cur_rx; 1119 1120 sc = xsc; 1121 ifp = &sc->arpcom.ac_if; 1122 1123 while(sc->tl_cdata.tl_rx_head != NULL) { 1124 cur_rx = sc->tl_cdata.tl_rx_head; 1125 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1126 break; 1127 r++; 1128 sc->tl_cdata.tl_rx_head = cur_rx->tl_next; 1129 m = cur_rx->tl_mbuf; 1130 total_len = cur_rx->tl_ptr->tlist_frsize; 1131 1132 if (tl_newbuf(sc, cur_rx) == ENOBUFS) { 1133 ifp->if_ierrors++; 1134 cur_rx->tl_ptr->tlist_frsize = MCLBYTES; 1135 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1136 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1137 continue; 1138 } 1139 1140 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = 1141 VTOPHYS(cur_rx->tl_ptr); 1142 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; 1143 sc->tl_cdata.tl_rx_tail = cur_rx; 1144 1145 eh = mtod(m, struct ether_header *); 1146 m->m_pkthdr.rcvif = ifp; 1147 1148 /* 1149 * Note: when the ThunderLAN chip is in 'capture all 1150 * frames' mode, it will receive its own transmissions. 1151 * We drop don't need to process our own transmissions, 1152 * so we drop them here and continue. 1153 */ 1154 /*if (ifp->if_flags & IFF_PROMISC && */ 1155 if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, 1156 ETHER_ADDR_LEN)) { 1157 m_freem(m); 1158 continue; 1159 } 1160 1161 m->m_pkthdr.len = m->m_len = total_len; 1162 #if NBPFILTER > 0 1163 /* 1164 * Handle BPF listeners. Let the BPF user see the packet, but 1165 * don't pass it up to the ether_input() layer unless it's 1166 * a broadcast packet, multicast packet, matches our ethernet 1167 * address or the interface is in promiscuous mode. If we don't 1168 * want the packet, just forget it. We leave the mbuf in place 1169 * since it can be used again later. 1170 */ 1171 if (ifp->if_bpf) { 1172 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1173 } 1174 #endif 1175 /* pass it on. */ 1176 ether_input_mbuf(ifp, m); 1177 } 1178 1179 return(r); 1180 } 1181 1182 /* 1183 * The RX-EOC condition hits when the ch_parm address hasn't been 1184 * initialized or the adapter reached a list with a forward pointer 1185 * of 0 (which indicates the end of the chain). In our case, this means 1186 * the card has hit the end of the receive buffer chain and we need to 1187 * empty out the buffers and shift the pointer back to the beginning again. 1188 */ 1189 int tl_intvec_rxeoc(xsc, type) 1190 void *xsc; 1191 u_int32_t type; 1192 { 1193 struct tl_softc *sc; 1194 int r; 1195 struct tl_chain_data *cd; 1196 1197 sc = xsc; 1198 cd = &sc->tl_cdata; 1199 1200 /* Flush out the receive queue and ack RXEOF interrupts. */ 1201 r = tl_intvec_rxeof(xsc, type); 1202 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); 1203 r = 1; 1204 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1205 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1206 CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(sc->tl_cdata.tl_rx_head->tl_ptr)); 1207 r |= (TL_CMD_GO|TL_CMD_RT); 1208 return(r); 1209 } 1210 1211 int tl_intvec_txeof(xsc, type) 1212 void *xsc; 1213 u_int32_t type; 1214 { 1215 struct tl_softc *sc; 1216 int r = 0; 1217 struct tl_chain *cur_tx; 1218 1219 sc = xsc; 1220 1221 /* 1222 * Go through our tx list and free mbufs for those 1223 * frames that have been sent. 1224 */ 1225 while (sc->tl_cdata.tl_tx_head != NULL) { 1226 cur_tx = sc->tl_cdata.tl_tx_head; 1227 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1228 break; 1229 sc->tl_cdata.tl_tx_head = cur_tx->tl_next; 1230 1231 r++; 1232 m_freem(cur_tx->tl_mbuf); 1233 cur_tx->tl_mbuf = NULL; 1234 1235 cur_tx->tl_next = sc->tl_cdata.tl_tx_free; 1236 sc->tl_cdata.tl_tx_free = cur_tx; 1237 if (!cur_tx->tl_ptr->tlist_fptr) 1238 break; 1239 } 1240 1241 return(r); 1242 } 1243 1244 /* 1245 * The transmit end of channel interrupt. The adapter triggers this 1246 * interrupt to tell us it hit the end of the current transmit list. 1247 * 1248 * A note about this: it's possible for a condition to arise where 1249 * tl_start() may try to send frames between TXEOF and TXEOC interrupts. 1250 * You have to avoid this since the chip expects things to go in a 1251 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. 1252 * When the TXEOF handler is called, it will free all of the transmitted 1253 * frames and reset the tx_head pointer to NULL. However, a TXEOC 1254 * interrupt should be received and acknowledged before any more frames 1255 * are queued for transmission. If tl_statrt() is called after TXEOF 1256 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, 1257 * it could attempt to issue a transmit command prematurely. 1258 * 1259 * To guard against this, tl_start() will only issue transmit commands 1260 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler 1261 * can set this flag once tl_start() has cleared it. 1262 */ 1263 int tl_intvec_txeoc(xsc, type) 1264 void *xsc; 1265 u_int32_t type; 1266 { 1267 struct tl_softc *sc; 1268 struct ifnet *ifp; 1269 u_int32_t cmd; 1270 1271 sc = xsc; 1272 ifp = &sc->arpcom.ac_if; 1273 1274 /* Clear the timeout timer. */ 1275 ifp->if_timer = 0; 1276 1277 if (sc->tl_cdata.tl_tx_head == NULL) { 1278 ifp->if_flags &= ~IFF_OACTIVE; 1279 sc->tl_cdata.tl_tx_tail = NULL; 1280 sc->tl_txeoc = 1; 1281 } else { 1282 sc->tl_txeoc = 0; 1283 /* First we have to ack the EOC interrupt. */ 1284 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); 1285 /* Then load the address of the next TX list. */ 1286 CSR_WRITE_4(sc, TL_CH_PARM, 1287 VTOPHYS(sc->tl_cdata.tl_tx_head->tl_ptr)); 1288 /* Restart TX channel. */ 1289 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1290 cmd &= ~TL_CMD_RT; 1291 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1292 CMD_PUT(sc, cmd); 1293 return(0); 1294 } 1295 1296 return(1); 1297 } 1298 1299 int tl_intvec_adchk(xsc, type) 1300 void *xsc; 1301 u_int32_t type; 1302 { 1303 struct tl_softc *sc; 1304 1305 sc = xsc; 1306 1307 if (type) 1308 printf("%s: adapter check: %x\n", sc->sc_dev.dv_xname, 1309 (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); 1310 1311 tl_softreset(sc, 1); 1312 tl_stop(sc); 1313 tl_init(sc); 1314 CMD_SET(sc, TL_CMD_INTSON); 1315 1316 return(0); 1317 } 1318 1319 int tl_intvec_netsts(xsc, type) 1320 void *xsc; 1321 u_int32_t type; 1322 { 1323 struct tl_softc *sc; 1324 u_int16_t netsts; 1325 1326 sc = xsc; 1327 1328 netsts = tl_dio_read16(sc, TL_NETSTS); 1329 tl_dio_write16(sc, TL_NETSTS, netsts); 1330 1331 printf("%s: network status: %x\n", sc->sc_dev.dv_xname, netsts); 1332 1333 return(1); 1334 } 1335 1336 int tl_intr(xsc) 1337 void *xsc; 1338 { 1339 struct tl_softc *sc; 1340 struct ifnet *ifp; 1341 int r = 0; 1342 u_int32_t type = 0; 1343 u_int16_t ints = 0; 1344 u_int8_t ivec = 0; 1345 1346 sc = xsc; 1347 1348 /* Disable interrupts */ 1349 ints = CSR_READ_2(sc, TL_HOST_INT); 1350 CSR_WRITE_2(sc, TL_HOST_INT, ints); 1351 type = (ints << 16) & 0xFFFF0000; 1352 ivec = (ints & TL_VEC_MASK) >> 5; 1353 ints = (ints & TL_INT_MASK) >> 2; 1354 1355 ifp = &sc->arpcom.ac_if; 1356 1357 switch(ints) { 1358 case (TL_INTR_INVALID): 1359 /* Re-enable interrupts but don't ack this one. */ 1360 CMD_PUT(sc, type); 1361 r = 0; 1362 break; 1363 case (TL_INTR_TXEOF): 1364 r = tl_intvec_txeof((void *)sc, type); 1365 break; 1366 case (TL_INTR_TXEOC): 1367 r = tl_intvec_txeoc((void *)sc, type); 1368 break; 1369 case (TL_INTR_STATOFLOW): 1370 tl_stats_update(sc); 1371 r = 1; 1372 break; 1373 case (TL_INTR_RXEOF): 1374 r = tl_intvec_rxeof((void *)sc, type); 1375 break; 1376 case (TL_INTR_DUMMY): 1377 printf("%s: got a dummy interrupt\n", sc->sc_dev.dv_xname); 1378 r = 1; 1379 break; 1380 case (TL_INTR_ADCHK): 1381 if (ivec) 1382 r = tl_intvec_adchk((void *)sc, type); 1383 else 1384 r = tl_intvec_netsts((void *)sc, type); 1385 break; 1386 case (TL_INTR_RXEOC): 1387 r = tl_intvec_rxeoc((void *)sc, type); 1388 break; 1389 default: 1390 printf("%s: bogus interrupt type\n", sc->sc_dev.dv_xname); 1391 break; 1392 } 1393 1394 /* Re-enable interrupts */ 1395 if (r) { 1396 CMD_PUT(sc, TL_CMD_ACK | r | type); 1397 } 1398 1399 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1400 tl_start(ifp); 1401 1402 return r; 1403 } 1404 1405 void tl_stats_update(xsc) 1406 void *xsc; 1407 { 1408 struct tl_softc *sc; 1409 struct ifnet *ifp; 1410 struct tl_stats tl_stats; 1411 u_int32_t *p; 1412 int s; 1413 1414 s = splnet(); 1415 1416 bzero(&tl_stats, sizeof(struct tl_stats)); 1417 1418 sc = xsc; 1419 ifp = &sc->arpcom.ac_if; 1420 1421 p = (u_int32_t *)&tl_stats; 1422 1423 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 1424 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1425 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1426 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1427 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1428 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1429 1430 ifp->if_opackets += tl_tx_goodframes(tl_stats); 1431 ifp->if_collisions += tl_stats.tl_tx_single_collision + 1432 tl_stats.tl_tx_multi_collision; 1433 ifp->if_ipackets += tl_rx_goodframes(tl_stats); 1434 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + 1435 tl_rx_overrun(tl_stats); 1436 ifp->if_oerrors += tl_tx_underrun(tl_stats); 1437 1438 if (tl_tx_underrun(tl_stats)) { 1439 u_int8_t tx_thresh; 1440 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; 1441 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { 1442 tx_thresh >>= 4; 1443 tx_thresh++; 1444 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1445 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); 1446 } 1447 } 1448 1449 timeout_add_sec(&sc->tl_stats_tmo, 1); 1450 1451 if (!sc->tl_bitrate) 1452 mii_tick(&sc->sc_mii); 1453 1454 splx(s); 1455 return; 1456 } 1457 1458 /* 1459 * Encapsulate an mbuf chain in a list by coupling the mbuf data 1460 * pointers to the fragment pointers. 1461 */ 1462 int tl_encap(sc, c, m_head) 1463 struct tl_softc *sc; 1464 struct tl_chain *c; 1465 struct mbuf *m_head; 1466 { 1467 int frag = 0; 1468 struct tl_frag *f = NULL; 1469 int total_len; 1470 struct mbuf *m; 1471 1472 /* 1473 * Start packing the mbufs in this chain into 1474 * the fragment pointers. Stop when we run out 1475 * of fragments or hit the end of the mbuf chain. 1476 */ 1477 m = m_head; 1478 total_len = 0; 1479 1480 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1481 if (m->m_len != 0) { 1482 if (frag == TL_MAXFRAGS) 1483 break; 1484 total_len+= m->m_len; 1485 c->tl_ptr->tl_frag[frag].tlist_dadr = 1486 VTOPHYS(mtod(m, vaddr_t)); 1487 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; 1488 frag++; 1489 } 1490 } 1491 1492 /* 1493 * Handle special cases. 1494 * Special case #1: we used up all 10 fragments, but 1495 * we have more mbufs left in the chain. Copy the 1496 * data into an mbuf cluster. Note that we don't 1497 * bother clearing the values in the other fragment 1498 * pointers/counters; it wouldn't gain us anything, 1499 * and would waste cycles. 1500 */ 1501 if (m != NULL) { 1502 struct mbuf *m_new = NULL; 1503 1504 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1505 if (m_new == NULL) { 1506 return(1); 1507 } 1508 if (m_head->m_pkthdr.len > MHLEN) { 1509 MCLGET(m_new, M_DONTWAIT); 1510 if (!(m_new->m_flags & M_EXT)) { 1511 m_freem(m_new); 1512 return(1); 1513 } 1514 } 1515 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1516 mtod(m_new, caddr_t)); 1517 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1518 m_freem(m_head); 1519 m_head = m_new; 1520 f = &c->tl_ptr->tl_frag[0]; 1521 f->tlist_dadr = VTOPHYS(mtod(m_new, caddr_t)); 1522 f->tlist_dcnt = total_len = m_new->m_len; 1523 frag = 1; 1524 } 1525 1526 /* 1527 * Special case #2: the frame is smaller than the minimum 1528 * frame size. We have to pad it to make the chip happy. 1529 */ 1530 if (total_len < TL_MIN_FRAMELEN) { 1531 f = &c->tl_ptr->tl_frag[frag]; 1532 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; 1533 f->tlist_dadr = VTOPHYS(&sc->tl_ldata->tl_pad); 1534 total_len += f->tlist_dcnt; 1535 frag++; 1536 } 1537 1538 c->tl_mbuf = m_head; 1539 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; 1540 c->tl_ptr->tlist_frsize = total_len; 1541 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1542 c->tl_ptr->tlist_fptr = 0; 1543 1544 return(0); 1545 } 1546 1547 /* 1548 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1549 * to the mbuf data regions directly in the transmit lists. We also save a 1550 * copy of the pointers since the transmit list fragment pointers are 1551 * physical addresses. 1552 */ 1553 void tl_start(ifp) 1554 struct ifnet *ifp; 1555 { 1556 struct tl_softc *sc; 1557 struct mbuf *m_head = NULL; 1558 u_int32_t cmd; 1559 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1560 1561 sc = ifp->if_softc; 1562 1563 /* 1564 * Check for an available queue slot. If there are none, 1565 * punt. 1566 */ 1567 if (sc->tl_cdata.tl_tx_free == NULL) { 1568 ifp->if_flags |= IFF_OACTIVE; 1569 return; 1570 } 1571 1572 start_tx = sc->tl_cdata.tl_tx_free; 1573 1574 while(sc->tl_cdata.tl_tx_free != NULL) { 1575 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1576 if (m_head == NULL) 1577 break; 1578 1579 /* Pick a chain member off the free list. */ 1580 cur_tx = sc->tl_cdata.tl_tx_free; 1581 sc->tl_cdata.tl_tx_free = cur_tx->tl_next; 1582 1583 cur_tx->tl_next = NULL; 1584 1585 /* Pack the data into the list. */ 1586 tl_encap(sc, cur_tx, m_head); 1587 1588 /* Chain it together */ 1589 if (prev != NULL) { 1590 prev->tl_next = cur_tx; 1591 prev->tl_ptr->tlist_fptr = VTOPHYS(cur_tx->tl_ptr); 1592 } 1593 prev = cur_tx; 1594 1595 /* 1596 * If there's a BPF listener, bounce a copy of this frame 1597 * to him. 1598 */ 1599 #if NBPFILTER > 0 1600 if (ifp->if_bpf) 1601 bpf_mtap(ifp->if_bpf, cur_tx->tl_mbuf, 1602 BPF_DIRECTION_OUT); 1603 #endif 1604 } 1605 1606 /* 1607 * If there are no packets queued, bail. 1608 */ 1609 if (cur_tx == NULL) 1610 return; 1611 1612 /* 1613 * That's all we can stands, we can't stands no more. 1614 * If there are no other transfers pending, then issue the 1615 * TX GO command to the adapter to start things moving. 1616 * Otherwise, just leave the data in the queue and let 1617 * the EOF/EOC interrupt handler send. 1618 */ 1619 if (sc->tl_cdata.tl_tx_head == NULL) { 1620 sc->tl_cdata.tl_tx_head = start_tx; 1621 sc->tl_cdata.tl_tx_tail = cur_tx; 1622 1623 if (sc->tl_txeoc) { 1624 sc->tl_txeoc = 0; 1625 CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(start_tx->tl_ptr)); 1626 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1627 cmd &= ~TL_CMD_RT; 1628 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1629 CMD_PUT(sc, cmd); 1630 } 1631 } else { 1632 sc->tl_cdata.tl_tx_tail->tl_next = start_tx; 1633 sc->tl_cdata.tl_tx_tail = cur_tx; 1634 } 1635 1636 /* 1637 * Set a timeout in case the chip goes out to lunch. 1638 */ 1639 ifp->if_timer = 10; 1640 1641 return; 1642 } 1643 1644 void tl_init(xsc) 1645 void *xsc; 1646 { 1647 struct tl_softc *sc = xsc; 1648 struct ifnet *ifp = &sc->arpcom.ac_if; 1649 int s; 1650 1651 s = splnet(); 1652 1653 /* 1654 * Cancel pending I/O. 1655 */ 1656 tl_stop(sc); 1657 1658 /* Initialize TX FIFO threshold */ 1659 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1660 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); 1661 1662 /* Set PCI burst size */ 1663 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); 1664 1665 /* 1666 * Set 'capture all frames' bit for promiscuous mode. 1667 */ 1668 if (ifp->if_flags & IFF_PROMISC) 1669 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 1670 else 1671 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 1672 1673 /* 1674 * Set capture broadcast bit to capture broadcast frames. 1675 */ 1676 if (ifp->if_flags & IFF_BROADCAST) 1677 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); 1678 else 1679 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); 1680 1681 tl_dio_write16(sc, TL_MAXRX, MCLBYTES); 1682 1683 /* Init our MAC address */ 1684 tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0); 1685 1686 /* Init multicast filter, if needed. */ 1687 tl_setmulti(sc); 1688 1689 /* Init circular RX list. */ 1690 if (tl_list_rx_init(sc) == ENOBUFS) { 1691 printf("%s: initialization failed: no memory for rx buffers\n", 1692 sc->sc_dev.dv_xname); 1693 tl_stop(sc); 1694 splx(s); 1695 return; 1696 } 1697 1698 /* Init TX pointers. */ 1699 tl_list_tx_init(sc); 1700 1701 /* Enable PCI interrupts. */ 1702 CMD_SET(sc, TL_CMD_INTSON); 1703 1704 /* Load the address of the rx list */ 1705 CMD_SET(sc, TL_CMD_RT); 1706 CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(&sc->tl_ldata->tl_rx_list[0])); 1707 1708 if (!sc->tl_bitrate) { 1709 mii_mediachg(&sc->sc_mii); 1710 } else { 1711 tl_ifmedia_upd(ifp); 1712 } 1713 1714 /* Send the RX go command */ 1715 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); 1716 1717 splx(s); 1718 1719 /* Start the stats update counter */ 1720 timeout_set(&sc->tl_stats_tmo, tl_stats_update, sc); 1721 timeout_add_sec(&sc->tl_stats_tmo, 1); 1722 timeout_set(&sc->tl_wait_tmo, tl_wait_up, sc); 1723 timeout_add_sec(&sc->tl_wait_tmo, 2); 1724 1725 return; 1726 } 1727 1728 /* 1729 * Set media options. 1730 */ 1731 int 1732 tl_ifmedia_upd(ifp) 1733 struct ifnet *ifp; 1734 { 1735 struct tl_softc *sc = ifp->if_softc; 1736 1737 if (sc->tl_bitrate) 1738 tl_setmode(sc, sc->ifmedia.ifm_media); 1739 else 1740 mii_mediachg(&sc->sc_mii); 1741 1742 return(0); 1743 } 1744 1745 /* 1746 * Report current media status. 1747 */ 1748 void tl_ifmedia_sts(ifp, ifmr) 1749 struct ifnet *ifp; 1750 struct ifmediareq *ifmr; 1751 { 1752 struct tl_softc *sc; 1753 struct mii_data *mii; 1754 1755 sc = ifp->if_softc; 1756 mii = &sc->sc_mii; 1757 1758 ifmr->ifm_active = IFM_ETHER; 1759 if (sc->tl_bitrate) { 1760 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) 1761 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 1762 else 1763 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 1764 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) 1765 ifmr->ifm_active |= IFM_HDX; 1766 else 1767 ifmr->ifm_active |= IFM_FDX; 1768 return; 1769 } else { 1770 mii_pollstat(mii); 1771 ifmr->ifm_active = mii->mii_media_active; 1772 ifmr->ifm_status = mii->mii_media_status; 1773 } 1774 1775 return; 1776 } 1777 1778 int tl_ioctl(ifp, command, data) 1779 struct ifnet *ifp; 1780 u_long command; 1781 caddr_t data; 1782 { 1783 struct tl_softc *sc = ifp->if_softc; 1784 struct ifaddr *ifa = (struct ifaddr *) data; 1785 struct ifreq *ifr = (struct ifreq *) data; 1786 int s, error = 0; 1787 1788 s = splnet(); 1789 1790 switch(command) { 1791 case SIOCSIFADDR: 1792 ifp->if_flags |= IFF_UP; 1793 switch (ifa->ifa_addr->sa_family) { 1794 #ifdef INET 1795 case AF_INET: 1796 tl_init(sc); 1797 arp_ifinit(&sc->arpcom, ifa); 1798 break; 1799 #endif /* INET */ 1800 default: 1801 tl_init(sc); 1802 break; 1803 } 1804 break; 1805 1806 case SIOCSIFFLAGS: 1807 if (ifp->if_flags & IFF_UP) { 1808 if (ifp->if_flags & IFF_RUNNING && 1809 ifp->if_flags & IFF_PROMISC && 1810 !(sc->tl_if_flags & IFF_PROMISC)) { 1811 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 1812 tl_setmulti(sc); 1813 } else if (ifp->if_flags & IFF_RUNNING && 1814 !(ifp->if_flags & IFF_PROMISC) && 1815 sc->tl_if_flags & IFF_PROMISC) { 1816 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 1817 tl_setmulti(sc); 1818 } else 1819 tl_init(sc); 1820 } else { 1821 if (ifp->if_flags & IFF_RUNNING) { 1822 tl_stop(sc); 1823 } 1824 } 1825 sc->tl_if_flags = ifp->if_flags; 1826 error = 0; 1827 break; 1828 1829 case SIOCSIFMEDIA: 1830 case SIOCGIFMEDIA: 1831 if (sc->tl_bitrate) 1832 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1833 else 1834 error = ifmedia_ioctl(ifp, ifr, 1835 &sc->sc_mii.mii_media, command); 1836 break; 1837 1838 default: 1839 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1840 } 1841 1842 if (error == ENETRESET) { 1843 if (ifp->if_flags & IFF_RUNNING) 1844 tl_setmulti(sc); 1845 error = 0; 1846 } 1847 1848 splx(s); 1849 return(error); 1850 } 1851 1852 void tl_watchdog(ifp) 1853 struct ifnet *ifp; 1854 { 1855 struct tl_softc *sc; 1856 1857 sc = ifp->if_softc; 1858 1859 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1860 1861 ifp->if_oerrors++; 1862 1863 tl_softreset(sc, 1); 1864 tl_init(sc); 1865 1866 return; 1867 } 1868 1869 /* 1870 * Stop the adapter and free any mbufs allocated to the 1871 * RX and TX lists. 1872 */ 1873 void tl_stop(sc) 1874 struct tl_softc *sc; 1875 { 1876 int i; 1877 struct ifnet *ifp; 1878 1879 ifp = &sc->arpcom.ac_if; 1880 1881 /* Stop the stats updater. */ 1882 timeout_del(&sc->tl_stats_tmo); 1883 timeout_del(&sc->tl_wait_tmo); 1884 1885 /* Stop the transmitter */ 1886 CMD_CLR(sc, TL_CMD_RT); 1887 CMD_SET(sc, TL_CMD_STOP); 1888 CSR_WRITE_4(sc, TL_CH_PARM, 0); 1889 1890 /* Stop the receiver */ 1891 CMD_SET(sc, TL_CMD_RT); 1892 CMD_SET(sc, TL_CMD_STOP); 1893 CSR_WRITE_4(sc, TL_CH_PARM, 0); 1894 1895 /* 1896 * Disable host interrupts. 1897 */ 1898 CMD_SET(sc, TL_CMD_INTSOFF); 1899 1900 /* 1901 * Clear list pointer. 1902 */ 1903 CSR_WRITE_4(sc, TL_CH_PARM, 0); 1904 1905 /* 1906 * Free the RX lists. 1907 */ 1908 for (i = 0; i < TL_RX_LIST_CNT; i++) { 1909 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { 1910 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); 1911 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; 1912 } 1913 } 1914 bzero(&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list)); 1915 1916 /* 1917 * Free the TX list buffers. 1918 */ 1919 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1920 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { 1921 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); 1922 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; 1923 } 1924 } 1925 bzero(&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list)); 1926 1927 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1928 1929 return; 1930 } 1931 1932 int 1933 tl_probe(parent, match, aux) 1934 struct device *parent; 1935 void *match; 1936 void *aux; 1937 { 1938 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 1939 1940 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) { 1941 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TI_TLAN) 1942 return 1; 1943 return 0; 1944 } 1945 1946 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ) { 1947 switch (PCI_PRODUCT(pa->pa_id)) { 1948 case PCI_PRODUCT_COMPAQ_N100TX: 1949 case PCI_PRODUCT_COMPAQ_N10T: 1950 case PCI_PRODUCT_COMPAQ_IntNF3P: 1951 case PCI_PRODUCT_COMPAQ_DPNet100TX: 1952 case PCI_PRODUCT_COMPAQ_IntPL100TX: 1953 case PCI_PRODUCT_COMPAQ_DP4000: 1954 case PCI_PRODUCT_COMPAQ_N10T2: 1955 case PCI_PRODUCT_COMPAQ_N10_TX_UTP: 1956 case PCI_PRODUCT_COMPAQ_NF3P: 1957 case PCI_PRODUCT_COMPAQ_NF3P_BNC: 1958 return 1; 1959 } 1960 return 0; 1961 } 1962 1963 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) { 1964 switch (PCI_PRODUCT(pa->pa_id)) { 1965 case PCI_PRODUCT_OLICOM_OC2183: 1966 case PCI_PRODUCT_OLICOM_OC2325: 1967 case PCI_PRODUCT_OLICOM_OC2326: 1968 return 1; 1969 } 1970 return 0; 1971 } 1972 1973 return 0; 1974 } 1975 1976 void 1977 tl_attach(parent, self, aux) 1978 struct device *parent, *self; 1979 void *aux; 1980 { 1981 struct tl_softc *sc = (struct tl_softc *)self; 1982 struct pci_attach_args *pa = aux; 1983 pci_chipset_tag_t pc = pa->pa_pc; 1984 pci_intr_handle_t ih; 1985 const char *intrstr = NULL; 1986 struct ifnet *ifp = &sc->arpcom.ac_if; 1987 bus_size_t iosize; 1988 u_int32_t command; 1989 int i, rseg; 1990 bus_dma_segment_t seg; 1991 bus_dmamap_t dmamap; 1992 caddr_t kva; 1993 1994 /* 1995 * Map control/status registers. 1996 */ 1997 1998 #ifdef TL_USEIOSPACE 1999 if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 2000 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) { 2001 if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_IO, 0, 2002 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) { 2003 printf(": can't map i/o space\n"); 2004 return; 2005 } 2006 } 2007 #else 2008 if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 2009 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){ 2010 if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_MEM, 0, 2011 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){ 2012 printf(": can't map mem space\n"); 2013 return; 2014 } 2015 } 2016 #endif 2017 2018 /* 2019 * Manual wants the PCI latency timer jacked up to 0xff 2020 */ 2021 command = pci_conf_read(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER); 2022 command |= 0x0000ff00; 2023 pci_conf_write(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER, command); 2024 2025 /* 2026 * Allocate our interrupt. 2027 */ 2028 if (pci_intr_map(pa, &ih)) { 2029 printf(": couldn't map interrupt\n"); 2030 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 2031 return; 2032 } 2033 intrstr = pci_intr_string(pc, ih); 2034 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, tl_intr, sc, 2035 self->dv_xname); 2036 if (sc->sc_ih == NULL) { 2037 printf(": could not establish interrupt"); 2038 if (intrstr != NULL) 2039 printf(" at %s", intrstr); 2040 printf("\n"); 2041 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 2042 return; 2043 } 2044 printf(": %s", intrstr); 2045 2046 sc->sc_dmat = pa->pa_dmat; 2047 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct tl_list_data), 2048 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 2049 printf("%s: can't alloc list\n", sc->sc_dev.dv_xname); 2050 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 2051 return; 2052 } 2053 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct tl_list_data), 2054 &kva, BUS_DMA_NOWAIT)) { 2055 printf("%s: can't map dma buffers (%d bytes)\n", 2056 sc->sc_dev.dv_xname, sizeof(struct tl_list_data)); 2057 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 2058 return; 2059 } 2060 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct tl_list_data), 1, 2061 sizeof(struct tl_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 2062 printf("%s: can't create dma map\n", sc->sc_dev.dv_xname); 2063 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data)); 2064 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 2065 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 2066 return; 2067 } 2068 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, 2069 sizeof(struct tl_list_data), NULL, BUS_DMA_NOWAIT)) { 2070 printf("%s: can't load dma map\n", sc->sc_dev.dv_xname); 2071 bus_dmamap_destroy(sc->sc_dmat, dmamap); 2072 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data)); 2073 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 2074 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 2075 return; 2076 } 2077 sc->tl_ldata = (struct tl_list_data *)kva; 2078 2079 for (sc->tl_product = tl_prods; sc->tl_product->tp_vend; 2080 sc->tl_product++) { 2081 if (sc->tl_product->tp_vend == PCI_VENDOR(pa->pa_id) && 2082 sc->tl_product->tp_prod == PCI_PRODUCT(pa->pa_id)) 2083 break; 2084 } 2085 2086 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ || 2087 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) 2088 sc->tl_eeaddr = TL_EEPROM_EADDR; 2089 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) 2090 sc->tl_eeaddr = TL_EEPROM_EADDR_OC; 2091 2092 /* 2093 * Reset adapter. 2094 */ 2095 tl_softreset(sc, 1); 2096 tl_hardreset(self); 2097 DELAY(1000000); 2098 tl_softreset(sc, 1); 2099 2100 /* 2101 * Get station address from the EEPROM. 2102 */ 2103 if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 2104 sc->tl_eeaddr, ETHER_ADDR_LEN)) { 2105 printf("\n%s: failed to read station address\n", 2106 sc->sc_dev.dv_xname); 2107 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 2108 return; 2109 } 2110 2111 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) { 2112 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 2113 u_int16_t *p; 2114 2115 p = (u_int16_t *)&sc->arpcom.ac_enaddr[i]; 2116 *p = ntohs(*p); 2117 } 2118 } 2119 2120 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 2121 2122 ifp = &sc->arpcom.ac_if; 2123 ifp->if_softc = sc; 2124 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2125 ifp->if_ioctl = tl_ioctl; 2126 ifp->if_start = tl_start; 2127 ifp->if_watchdog = tl_watchdog; 2128 ifp->if_baudrate = 10000000; 2129 IFQ_SET_MAXLEN(&ifp->if_snd, TL_TX_LIST_CNT - 1); 2130 IFQ_SET_READY(&ifp->if_snd); 2131 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 2132 2133 /* 2134 * Reset adapter (again). 2135 */ 2136 tl_softreset(sc, 1); 2137 tl_hardreset(self); 2138 DELAY(1000000); 2139 tl_softreset(sc, 1); 2140 2141 /* 2142 * Do MII setup. If no PHYs are found, then this is a 2143 * bitrate ThunderLAN chip that only supports 10baseT 2144 * and AUI/BNC. 2145 */ 2146 sc->sc_mii.mii_ifp = ifp; 2147 sc->sc_mii.mii_readreg = tl_miibus_readreg; 2148 sc->sc_mii.mii_writereg = tl_miibus_writereg; 2149 sc->sc_mii.mii_statchg = tl_miibus_statchg; 2150 ifmedia_init(&sc->sc_mii.mii_media, 0, tl_ifmedia_upd, tl_ifmedia_sts); 2151 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 2152 0); 2153 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2154 struct ifmedia *ifm; 2155 sc->tl_bitrate = 1; 2156 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); 2157 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 2158 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 2159 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2160 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); 2161 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); 2162 /* Reset again, this time setting bitrate mode. */ 2163 tl_softreset(sc, 1); 2164 ifm = &sc->ifmedia; 2165 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2166 tl_ifmedia_upd(ifp); 2167 } else 2168 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2169 2170 /* 2171 * Attach us everywhere. 2172 */ 2173 if_attach(ifp); 2174 ether_ifattach(ifp); 2175 } 2176 2177 void 2178 tl_wait_up(xsc) 2179 void *xsc; 2180 { 2181 struct tl_softc *sc = xsc; 2182 struct ifnet *ifp = &sc->arpcom.ac_if; 2183 2184 ifp->if_flags |= IFF_RUNNING; 2185 ifp->if_flags &= ~IFF_OACTIVE; 2186 } 2187 2188 struct cfattach tl_ca = { 2189 sizeof(struct tl_softc), tl_probe, tl_attach 2190 }; 2191 2192 struct cfdriver tl_cd = { 2193 NULL, "tl", DV_IFNET 2194 }; 2195