1 /* $OpenBSD: if_tl.c,v 1.79 2024/05/24 06:02:57 jsg Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_tl.c,v 1.64 2001/02/06 10:11:48 phk Exp $ 35 */ 36 37 /* 38 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. 39 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, 40 * the National Semiconductor DP83840A physical interface and the 41 * Microchip Technology 24Cxx series serial EEPROM. 42 * 43 * Written using the following four documents: 44 * 45 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) 46 * National Semiconductor DP83840A data sheet (www.national.com) 47 * Microchip Technology 24C02C data sheet (www.microchip.com) 48 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) 49 * 50 * Written by Bill Paul <wpaul@ctr.columbia.edu> 51 * Electrical Engineering Department 52 * Columbia University, New York City 53 */ 54 55 /* 56 * Some notes about the ThunderLAN: 57 * 58 * The ThunderLAN controller is a single chip containing PCI controller 59 * logic, approximately 3K of on-board SRAM, a LAN controller, and media 60 * independent interface (MII) bus. The MII allows the ThunderLAN chip to 61 * control up to 32 different physical interfaces (PHYs). The ThunderLAN 62 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller 63 * to act as a complete ethernet interface. 64 * 65 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards 66 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec 67 * in full or half duplex. Some of the Compaq Deskpro machines use a 68 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters 69 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in 70 * concert with the ThunderLAN's internal PHY to provide full 10/100 71 * support. This is cheaper than using a standalone external PHY for both 72 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. 73 * A serial EEPROM is also attached to the ThunderLAN chip to provide 74 * power-up default register settings and for storing the adapter's 75 * station address. Although not supported by this driver, the ThunderLAN 76 * chip can also be connected to token ring PHYs. 77 * 78 * The ThunderLAN has a set of registers which can be used to issue 79 * commands, acknowledge interrupts, and to manipulate other internal 80 * registers on its DIO bus. The primary registers can be accessed 81 * using either programmed I/O (inb/outb) or via PCI memory mapping, 82 * depending on how the card is configured during the PCI probing 83 * phase. It is even possible to have both PIO and memory mapped 84 * access turned on at the same time. 85 * 86 * Frame reception and transmission with the ThunderLAN chip is done 87 * using frame 'lists.' A list structure looks more or less like this: 88 * 89 * struct tl_frag { 90 * u_int32_t fragment_address; 91 * u_int32_t fragment_size; 92 * }; 93 * struct tl_list { 94 * u_int32_t forward_pointer; 95 * u_int16_t cstat; 96 * u_int16_t frame_size; 97 * struct tl_frag fragments[10]; 98 * }; 99 * 100 * The forward pointer in the list header can be either a 0 or the address 101 * of another list, which allows several lists to be linked together. Each 102 * list contains up to 10 fragment descriptors. This means the chip allows 103 * ethernet frames to be broken up into up to 10 chunks for transfer to 104 * and from the SRAM. Note that the forward pointer and fragment buffer 105 * addresses are physical memory addresses, not virtual. Note also that 106 * a single ethernet frame can not span lists: if the host wants to 107 * transmit a frame and the frame data is split up over more than 10 108 * buffers, the frame has to collapsed before it can be transmitted. 109 * 110 * To receive frames, the driver sets up a number of lists and populates 111 * the fragment descriptors, then it sends an RX GO command to the chip. 112 * When a frame is received, the chip will DMA it into the memory regions 113 * specified by the fragment descriptors and then trigger an RX 'end of 114 * frame interrupt' when done. The driver may choose to use only one 115 * fragment per list; this may result in slightly less efficient use 116 * of memory in exchange for improving performance. 117 * 118 * To transmit frames, the driver again sets up lists and fragment 119 * descriptors, only this time the buffers contain frame data that 120 * is to be DMA'ed into the chip instead of out of it. Once the chip 121 * has transferred the data into its on-board SRAM, it will trigger a 122 * TX 'end of frame' interrupt. It will also generate an 'end of channel' 123 * interrupt when it reaches the end of the list. 124 */ 125 126 /* 127 * Some notes about this driver: 128 * 129 * The ThunderLAN chip provides a couple of different ways to organize 130 * reception, transmission and interrupt handling. The simplest approach 131 * is to use one list each for transmission and reception. In this mode, 132 * the ThunderLAN will generate two interrupts for every received frame 133 * (one RX EOF and one RX EOC) and two for each transmitted frame (one 134 * TX EOF and one TX EOC). This may make the driver simpler but it hurts 135 * performance to have to handle so many interrupts. 136 * 137 * Initially I wanted to create a circular list of receive buffers so 138 * that the ThunderLAN chip would think there was an infinitely long 139 * receive channel and never deliver an RXEOC interrupt. However this 140 * doesn't work correctly under heavy load: while the manual says the 141 * chip will trigger an RXEOF interrupt each time a frame is copied into 142 * memory, you can't count on the chip waiting around for you to acknowledge 143 * the interrupt before it starts trying to DMA the next frame. The result 144 * is that the chip might traverse the entire circular list and then wrap 145 * around before you have a chance to do anything about it. Consequently, 146 * the receive list is terminated (with a 0 in the forward pointer in the 147 * last element). Each time an RXEOF interrupt arrives, the used list 148 * is shifted to the end of the list. This gives the appearance of an 149 * infinitely large RX chain so long as the driver doesn't fall behind 150 * the chip and allow all of the lists to be filled up. 151 * 152 * If all the lists are filled, the adapter will deliver an RX 'end of 153 * channel' interrupt when it hits the 0 forward pointer at the end of 154 * the chain. The RXEOC handler then cleans out the RX chain and resets 155 * the list head pointer in the ch_parm register and restarts the receiver. 156 * 157 * For frame transmission, it is possible to program the ThunderLAN's 158 * transmit interrupt threshold so that the chip can acknowledge multiple 159 * lists with only a single TX EOF interrupt. This allows the driver to 160 * queue several frames in one shot, and only have to handle a total 161 * two interrupts (one TX EOF and one TX EOC) no matter how many frames 162 * are transmitted. Frame transmission is done directly out of the 163 * mbufs passed to the tl_start() routine via the interface send queue. 164 * The driver simply sets up the fragment descriptors in the transmit 165 * lists to point to the mbuf data regions and sends a TX GO command. 166 * 167 * Note that since the RX and TX lists themselves are always used 168 * only by the driver, the are malloc()ed once at driver initialization 169 * time and never free()ed. 170 * 171 * Also, in order to remain as platform independent as possible, this 172 * driver uses memory mapped register access to manipulate the card 173 * as opposed to programmed I/O. This avoids the use of the inb/outb 174 * (and related) instructions which are specific to the i386 platform. 175 * 176 * Using these techniques, this driver achieves very high performance 177 * by minimizing the amount of interrupts generated during large 178 * transfers and by completely avoiding buffer copies. Frame transfer 179 * to and from the ThunderLAN chip is performed entirely by the chip 180 * itself thereby reducing the load on the host CPU. 181 */ 182 183 #include "bpfilter.h" 184 185 #include <sys/param.h> 186 #include <sys/systm.h> 187 #include <sys/sockio.h> 188 #include <sys/mbuf.h> 189 #include <sys/device.h> 190 #include <sys/timeout.h> 191 192 #include <net/if.h> 193 194 #include <netinet/in.h> 195 #include <netinet/if_ether.h> 196 197 #include <net/if_media.h> 198 199 #if NBPFILTER > 0 200 #include <net/bpf.h> 201 #endif 202 203 #include <uvm/uvm_extern.h> /* for vtophys */ 204 #define VTOPHYS(v) vtophys((vaddr_t)(v)) 205 206 #include <dev/mii/mii.h> 207 #include <dev/mii/miivar.h> 208 209 #include <dev/pci/pcireg.h> 210 #include <dev/pci/pcivar.h> 211 #include <dev/pci/pcidevs.h> 212 213 /* 214 * Default to using PIO register access mode to pacify certain 215 * laptop docking stations with built-in ThunderLAN chips that 216 * don't seem to handle memory mapped mode properly. 217 */ 218 #define TL_USEIOSPACE 219 220 #include <dev/pci/if_tlreg.h> 221 #include <dev/mii/tlphyvar.h> 222 223 const struct tl_products tl_prods[] = { 224 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T }, 225 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5 }, 226 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_INTNF3P, TLPHY_MEDIA_10_2 }, 227 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_INTPL100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T }, 228 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DPNET100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T }, 229 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T }, 230 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2 }, 231 { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5 }, 232 { PCI_VENDOR_TI, PCI_PRODUCT_TI_TLAN, 0 }, 233 { 0, 0, 0 } 234 }; 235 236 int tl_probe(struct device *, void *, void *); 237 void tl_attach(struct device *, struct device *, void *); 238 void tl_wait_up(void *); 239 int tl_intvec_rxeoc(void *, u_int32_t); 240 int tl_intvec_txeoc(void *, u_int32_t); 241 int tl_intvec_txeof(void *, u_int32_t); 242 int tl_intvec_rxeof(void *, u_int32_t); 243 int tl_intvec_adchk(void *, u_int32_t); 244 int tl_intvec_netsts(void *, u_int32_t); 245 246 int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *); 247 void tl_stats_update(void *); 248 int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *); 249 250 int tl_intr(void *); 251 void tl_start(struct ifnet *); 252 int tl_ioctl(struct ifnet *, u_long, caddr_t); 253 void tl_init(void *); 254 void tl_stop(struct tl_softc *); 255 void tl_watchdog(struct ifnet *); 256 int tl_ifmedia_upd(struct ifnet *); 257 void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 258 259 u_int8_t tl_eeprom_putbyte(struct tl_softc *, int); 260 u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *); 261 int tl_read_eeprom(struct tl_softc *, caddr_t, int, int); 262 263 void tl_mii_sync(struct tl_softc *); 264 void tl_mii_send(struct tl_softc *, u_int32_t, int); 265 int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *); 266 int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *); 267 int tl_miibus_readreg(struct device *, int, int); 268 void tl_miibus_writereg(struct device *, int, int, int); 269 void tl_miibus_statchg(struct device *); 270 271 void tl_setmode(struct tl_softc *, uint64_t); 272 int tl_calchash(u_int8_t *); 273 void tl_iff(struct tl_softc *); 274 void tl_setfilt(struct tl_softc *, caddr_t, int); 275 void tl_softreset(struct tl_softc *, int); 276 void tl_hardreset(struct device *); 277 int tl_list_rx_init(struct tl_softc *); 278 int tl_list_tx_init(struct tl_softc *); 279 280 u_int8_t tl_dio_read8(struct tl_softc *, int); 281 u_int16_t tl_dio_read16(struct tl_softc *, int); 282 u_int32_t tl_dio_read32(struct tl_softc *, int); 283 void tl_dio_write8(struct tl_softc *, int, int); 284 void tl_dio_write16(struct tl_softc *, int, int); 285 void tl_dio_write32(struct tl_softc *, int, int); 286 void tl_dio_setbit(struct tl_softc *, int, int); 287 void tl_dio_clrbit(struct tl_softc *, int, int); 288 void tl_dio_setbit16(struct tl_softc *, int, int); 289 void tl_dio_clrbit16(struct tl_softc *, int, int); 290 291 u_int8_t 292 tl_dio_read8(struct tl_softc *sc, int reg) 293 { 294 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 295 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); 296 } 297 298 u_int16_t 299 tl_dio_read16(struct tl_softc *sc, int reg) 300 { 301 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 302 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); 303 } 304 305 u_int32_t 306 tl_dio_read32(struct tl_softc *sc, int reg) 307 { 308 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 309 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); 310 } 311 312 void 313 tl_dio_write8(struct tl_softc *sc, int reg, int val) 314 { 315 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 316 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); 317 } 318 319 void 320 tl_dio_write16(struct tl_softc *sc, int reg, int val) 321 { 322 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 323 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); 324 } 325 326 void 327 tl_dio_write32(struct tl_softc *sc, int reg, int val) 328 { 329 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 330 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); 331 } 332 333 void 334 tl_dio_setbit(struct tl_softc *sc, int reg, int bit) 335 { 336 u_int8_t f; 337 338 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 339 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 340 f |= bit; 341 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 342 } 343 344 void 345 tl_dio_clrbit(struct tl_softc *sc, int reg, int bit) 346 { 347 u_int8_t f; 348 349 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 350 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 351 f &= ~bit; 352 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 353 } 354 355 void 356 tl_dio_setbit16(struct tl_softc *sc, int reg, int bit) 357 { 358 u_int16_t f; 359 360 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 361 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 362 f |= bit; 363 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 364 } 365 366 void 367 tl_dio_clrbit16(struct tl_softc *sc, int reg, int bit) 368 { 369 u_int16_t f; 370 371 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 372 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 373 f &= ~bit; 374 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 375 } 376 377 /* 378 * Send an instruction or address to the EEPROM, check for ACK. 379 */ 380 u_int8_t 381 tl_eeprom_putbyte(struct tl_softc *sc, int byte) 382 { 383 int i, ack = 0; 384 385 /* 386 * Make sure we're in TX mode. 387 */ 388 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); 389 390 /* 391 * Feed in each bit and strobe the clock. 392 */ 393 for (i = 0x80; i; i >>= 1) { 394 if (byte & i) 395 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); 396 else 397 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); 398 DELAY(1); 399 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 400 DELAY(1); 401 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 402 } 403 404 /* 405 * Turn off TX mode. 406 */ 407 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 408 409 /* 410 * Check for ack. 411 */ 412 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 413 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; 414 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 415 416 return(ack); 417 } 418 419 /* 420 * Read a byte of data stored in the EEPROM at address 'addr.' 421 */ 422 u_int8_t 423 tl_eeprom_getbyte(struct tl_softc *sc, int addr, u_int8_t *dest) 424 { 425 int i; 426 u_int8_t byte = 0; 427 428 tl_dio_write8(sc, TL_NETSIO, 0); 429 430 EEPROM_START; 431 432 /* 433 * Send write control code to EEPROM. 434 */ 435 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 436 printf("%s: failed to send write command, status: %x\n", 437 sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO)); 438 return(1); 439 } 440 441 /* 442 * Send address of byte we want to read. 443 */ 444 if (tl_eeprom_putbyte(sc, addr)) { 445 printf("%s: failed to send address, status: %x\n", 446 sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO)); 447 return(1); 448 } 449 450 EEPROM_STOP; 451 EEPROM_START; 452 /* 453 * Send read control code to EEPROM. 454 */ 455 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 456 printf("%s: failed to send write command, status: %x\n", 457 sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO)); 458 return(1); 459 } 460 461 /* 462 * Start reading bits from EEPROM. 463 */ 464 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 465 for (i = 0x80; i; i >>= 1) { 466 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 467 DELAY(1); 468 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) 469 byte |= i; 470 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 471 DELAY(1); 472 } 473 474 EEPROM_STOP; 475 476 /* 477 * No ACK generated for read, so just return byte. 478 */ 479 480 *dest = byte; 481 482 return(0); 483 } 484 485 /* 486 * Read a sequence of bytes from the EEPROM. 487 */ 488 int 489 tl_read_eeprom(struct tl_softc *sc, caddr_t dest, int off, int cnt) 490 { 491 int err = 0, i; 492 u_int8_t byte = 0; 493 494 for (i = 0; i < cnt; i++) { 495 err = tl_eeprom_getbyte(sc, off + i, &byte); 496 if (err) 497 break; 498 *(dest + i) = byte; 499 } 500 501 return(err ? 1 : 0); 502 } 503 504 void 505 tl_mii_sync(struct tl_softc *sc) 506 { 507 int i; 508 509 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 510 511 for (i = 0; i < 32; i++) { 512 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 513 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 514 } 515 } 516 517 void 518 tl_mii_send(struct tl_softc *sc, u_int32_t bits, int cnt) 519 { 520 int i; 521 522 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 523 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 524 if (bits & i) 525 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); 526 else 527 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); 528 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 529 } 530 } 531 532 int 533 tl_mii_readreg(struct tl_softc *sc, struct tl_mii_frame *frame) 534 { 535 int i, ack, s; 536 int minten = 0; 537 538 s = splnet(); 539 540 tl_mii_sync(sc); 541 542 /* 543 * Set up frame for RX. 544 */ 545 frame->mii_stdelim = TL_MII_STARTDELIM; 546 frame->mii_opcode = TL_MII_READOP; 547 frame->mii_turnaround = 0; 548 frame->mii_data = 0; 549 550 /* 551 * Turn off MII interrupt by forcing MINTEN low. 552 */ 553 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 554 if (minten) 555 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 556 557 /* 558 * Turn on data xmit. 559 */ 560 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 561 562 /* 563 * Send command/address info. 564 */ 565 tl_mii_send(sc, frame->mii_stdelim, 2); 566 tl_mii_send(sc, frame->mii_opcode, 2); 567 tl_mii_send(sc, frame->mii_phyaddr, 5); 568 tl_mii_send(sc, frame->mii_regaddr, 5); 569 570 /* 571 * Turn off xmit. 572 */ 573 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 574 575 /* Idle bit */ 576 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 577 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 578 579 /* Check for ack */ 580 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 581 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; 582 583 /* Complete the cycle */ 584 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 585 586 /* 587 * Now try reading data bits. If the ack failed, we still 588 * need to clock through 16 cycles to keep the PHYs in sync. 589 */ 590 if (ack) { 591 for(i = 0; i < 16; i++) { 592 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 593 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 594 } 595 goto fail; 596 } 597 598 for (i = 0x8000; i; i >>= 1) { 599 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 600 if (!ack) { 601 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) 602 frame->mii_data |= i; 603 } 604 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 605 } 606 607 fail: 608 609 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 610 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 611 612 /* Reenable interrupts */ 613 if (minten) 614 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 615 616 splx(s); 617 618 if (ack) 619 return(1); 620 return(0); 621 } 622 623 int 624 tl_mii_writereg(struct tl_softc *sc, struct tl_mii_frame *frame) 625 { 626 int s; 627 int minten; 628 629 tl_mii_sync(sc); 630 631 s = splnet(); 632 /* 633 * Set up frame for TX. 634 */ 635 636 frame->mii_stdelim = TL_MII_STARTDELIM; 637 frame->mii_opcode = TL_MII_WRITEOP; 638 frame->mii_turnaround = TL_MII_TURNAROUND; 639 640 /* 641 * Turn off MII interrupt by forcing MINTEN low. 642 */ 643 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 644 if (minten) 645 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 646 647 /* 648 * Turn on data output. 649 */ 650 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 651 652 tl_mii_send(sc, frame->mii_stdelim, 2); 653 tl_mii_send(sc, frame->mii_opcode, 2); 654 tl_mii_send(sc, frame->mii_phyaddr, 5); 655 tl_mii_send(sc, frame->mii_regaddr, 5); 656 tl_mii_send(sc, frame->mii_turnaround, 2); 657 tl_mii_send(sc, frame->mii_data, 16); 658 659 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 660 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 661 662 /* 663 * Turn off xmit. 664 */ 665 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 666 667 /* Reenable interrupts */ 668 if (minten) 669 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 670 671 splx(s); 672 673 return(0); 674 } 675 676 int 677 tl_miibus_readreg(struct device *dev, int phy, int reg) 678 { 679 struct tl_softc *sc = (struct tl_softc *)dev; 680 struct tl_mii_frame frame; 681 682 bzero(&frame, sizeof(frame)); 683 684 frame.mii_phyaddr = phy; 685 frame.mii_regaddr = reg; 686 tl_mii_readreg(sc, &frame); 687 688 return(frame.mii_data); 689 } 690 691 void 692 tl_miibus_writereg(struct device *dev, int phy, int reg, int data) 693 { 694 struct tl_softc *sc = (struct tl_softc *)dev; 695 struct tl_mii_frame frame; 696 697 bzero(&frame, sizeof(frame)); 698 699 frame.mii_phyaddr = phy; 700 frame.mii_regaddr = reg; 701 frame.mii_data = data; 702 703 tl_mii_writereg(sc, &frame); 704 } 705 706 void 707 tl_miibus_statchg(struct device *dev) 708 { 709 struct tl_softc *sc = (struct tl_softc *)dev; 710 711 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 712 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 713 else 714 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 715 } 716 717 /* 718 * Set modes for bitrate devices. 719 */ 720 void 721 tl_setmode(struct tl_softc *sc, uint64_t media) 722 { 723 if (IFM_SUBTYPE(media) == IFM_10_5) 724 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 725 if (IFM_SUBTYPE(media) == IFM_10_T) { 726 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 727 if ((media & IFM_GMASK) == IFM_FDX) { 728 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 729 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 730 } else { 731 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 732 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 733 } 734 } 735 } 736 737 /* 738 * Calculate the hash of a MAC address for programming the multicast hash 739 * table. This hash is simply the address split into 6-bit chunks 740 * XOR'd, e.g. 741 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 742 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 743 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then 744 * the folded 24-bit value is split into 6-bit portions and XOR'd. 745 */ 746 int 747 tl_calchash(u_int8_t *addr) 748 { 749 int t; 750 751 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | 752 (addr[2] ^ addr[5]); 753 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; 754 } 755 756 /* 757 * The ThunderLAN has a perfect MAC address filter in addition to 758 * the multicast hash filter. The perfect filter can be programmed 759 * with up to four MAC addresses. The first one is always used to 760 * hold the station address, which leaves us free to use the other 761 * three for multicast addresses. 762 */ 763 void 764 tl_setfilt(struct tl_softc *sc, caddr_t addr, int slot) 765 { 766 int i; 767 u_int16_t regaddr; 768 769 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); 770 771 for (i = 0; i < ETHER_ADDR_LEN; i++) 772 tl_dio_write8(sc, regaddr + i, *(addr + i)); 773 } 774 775 /* 776 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly 777 * linked list. This is fine, except addresses are added from the head 778 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") 779 * group to always be in the perfect filter, but as more groups are added, 780 * the 224.0.0.1 entry (which is always added first) gets pushed down 781 * the list and ends up at the tail. So after 3 or 4 multicast groups 782 * are added, the all-hosts entry gets pushed out of the perfect filter 783 * and into the hash table. 784 * 785 * Because the multicast list is a doubly-linked list as opposed to a 786 * circular queue, we don't have the ability to just grab the tail of 787 * the list and traverse it backwards. Instead, we have to traverse 788 * the list once to find the tail, then traverse it again backwards to 789 * update the multicast filter. 790 */ 791 void 792 tl_iff(struct tl_softc *sc) 793 { 794 struct ifnet *ifp = &sc->arpcom.ac_if; 795 struct arpcom *ac = &sc->arpcom; 796 struct ether_multistep step; 797 struct ether_multi *enm; 798 u_int32_t hashes[2]; 799 int h = 0; 800 801 tl_dio_clrbit(sc, TL_NETCMD, (TL_CMD_CAF | TL_CMD_NOBRX)); 802 bzero(hashes, sizeof(hashes)); 803 ifp->if_flags &= ~IFF_ALLMULTI; 804 805 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 806 ifp->if_flags |= IFF_ALLMULTI; 807 if (ifp->if_flags & IFF_PROMISC) 808 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 809 else 810 hashes[0] = hashes[1] = 0xffffffff; 811 } else { 812 ETHER_FIRST_MULTI(step, ac, enm); 813 while (enm != NULL) { 814 h = tl_calchash(enm->enm_addrlo); 815 816 if (h < 32) 817 hashes[0] |= (1 << h); 818 else 819 hashes[1] |= (1 << (h - 32)); 820 821 ETHER_NEXT_MULTI(step, enm); 822 } 823 } 824 825 tl_dio_write32(sc, TL_HASH1, hashes[0]); 826 tl_dio_write32(sc, TL_HASH2, hashes[1]); 827 } 828 829 /* 830 * This routine is recommended by the ThunderLAN manual to insure that 831 * the internal PHY is powered up correctly. It also recommends a one 832 * second pause at the end to 'wait for the clocks to start' but in my 833 * experience this isn't necessary. 834 */ 835 void 836 tl_hardreset(struct device *dev) 837 { 838 struct tl_softc *sc = (struct tl_softc *)dev; 839 int i; 840 u_int16_t flags; 841 842 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; 843 844 for (i =0 ; i < MII_NPHY; i++) 845 tl_miibus_writereg(dev, i, MII_BMCR, flags); 846 847 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); 848 tl_mii_sync(sc); 849 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); 850 851 DELAY(5000); 852 } 853 854 void 855 tl_softreset(struct tl_softc *sc, int internal) 856 { 857 u_int32_t cmd, dummy, i; 858 859 /* Assert the adapter reset bit. */ 860 CMD_SET(sc, TL_CMD_ADRST); 861 /* Turn off interrupts */ 862 CMD_SET(sc, TL_CMD_INTSOFF); 863 864 /* First, clear the stats registers. */ 865 for (i = 0; i < 5; i++) 866 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); 867 868 /* Clear Areg and Hash registers */ 869 for (i = 0; i < 8; i++) 870 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); 871 872 /* 873 * Set up Netconfig register. Enable one channel and 874 * one fragment mode. 875 */ 876 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); 877 if (internal && !sc->tl_bitrate) { 878 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 879 } else { 880 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 881 } 882 883 /* Handle cards with bitrate devices. */ 884 if (sc->tl_bitrate) 885 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); 886 887 /* 888 * Load adapter irq pacing timer and tx threshold. 889 * We make the transmit threshold 1 initially but we may 890 * change that later. 891 */ 892 cmd = CSR_READ_4(sc, TL_HOSTCMD); 893 cmd |= TL_CMD_NES; 894 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); 895 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); 896 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); 897 898 /* Unreset the MII */ 899 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); 900 901 /* Take the adapter out of reset */ 902 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); 903 904 /* Wait for things to settle down a little. */ 905 DELAY(500); 906 } 907 908 /* 909 * Initialize the transmit lists. 910 */ 911 int 912 tl_list_tx_init(struct tl_softc *sc) 913 { 914 struct tl_chain_data *cd; 915 struct tl_list_data *ld; 916 int i; 917 918 cd = &sc->tl_cdata; 919 ld = sc->tl_ldata; 920 for (i = 0; i < TL_TX_LIST_CNT; i++) { 921 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; 922 if (i == (TL_TX_LIST_CNT - 1)) 923 cd->tl_tx_chain[i].tl_next = NULL; 924 else 925 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; 926 } 927 928 cd->tl_tx_free = &cd->tl_tx_chain[0]; 929 cd->tl_tx_tail = cd->tl_tx_head = NULL; 930 sc->tl_txeoc = 1; 931 932 return(0); 933 } 934 935 /* 936 * Initialize the RX lists and allocate mbufs for them. 937 */ 938 int 939 tl_list_rx_init(struct tl_softc *sc) 940 { 941 struct tl_chain_data *cd; 942 struct tl_list_data *ld; 943 int i; 944 945 cd = &sc->tl_cdata; 946 ld = sc->tl_ldata; 947 948 for (i = 0; i < TL_RX_LIST_CNT; i++) { 949 cd->tl_rx_chain[i].tl_ptr = 950 (struct tl_list_onefrag *)&ld->tl_rx_list[i]; 951 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) 952 return(ENOBUFS); 953 if (i == (TL_RX_LIST_CNT - 1)) { 954 cd->tl_rx_chain[i].tl_next = NULL; 955 ld->tl_rx_list[i].tlist_fptr = 0; 956 } else { 957 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; 958 ld->tl_rx_list[i].tlist_fptr = 959 VTOPHYS(&ld->tl_rx_list[i + 1]); 960 } 961 } 962 963 cd->tl_rx_head = &cd->tl_rx_chain[0]; 964 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 965 966 return(0); 967 } 968 969 int 970 tl_newbuf(struct tl_softc *sc, struct tl_chain_onefrag *c) 971 { 972 struct mbuf *m_new = NULL; 973 974 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 975 if (m_new == NULL) { 976 return(ENOBUFS); 977 } 978 979 MCLGET(m_new, M_DONTWAIT); 980 if (!(m_new->m_flags & M_EXT)) { 981 m_freem(m_new); 982 return(ENOBUFS); 983 } 984 985 #ifdef __alpha__ 986 m_new->m_data += 2; 987 #endif 988 989 c->tl_mbuf = m_new; 990 c->tl_next = NULL; 991 c->tl_ptr->tlist_frsize = MCLBYTES; 992 c->tl_ptr->tlist_fptr = 0; 993 c->tl_ptr->tl_frag.tlist_dadr = VTOPHYS(mtod(m_new, caddr_t)); 994 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 995 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 996 997 return(0); 998 } 999 /* 1000 * Interrupt handler for RX 'end of frame' condition (EOF). This 1001 * tells us that a full ethernet frame has been captured and we need 1002 * to handle it. 1003 * 1004 * Reception is done using 'lists' which consist of a header and a 1005 * series of 10 data count/data address pairs that point to buffers. 1006 * Initially you're supposed to create a list, populate it with pointers 1007 * to buffers, then load the physical address of the list into the 1008 * ch_parm register. The adapter is then supposed to DMA the received 1009 * frame into the buffers for you. 1010 * 1011 * To make things as fast as possible, we have the chip DMA directly 1012 * into mbufs. This saves us from having to do a buffer copy: we can 1013 * just hand the mbufs directly to the network stack. Once the frame 1014 * has been sent on its way, the 'list' structure is assigned a new 1015 * buffer and moved to the end of the RX chain. As long we stay 1016 * ahead of the chip, it will always think it has an endless receive 1017 * channel. 1018 * 1019 * If we happen to fall behind and the chip manages to fill up all of 1020 * the buffers, it will generate an end of channel interrupt and wait 1021 * for us to empty the chain and restart the receiver. 1022 */ 1023 int 1024 tl_intvec_rxeof(void *xsc, u_int32_t type) 1025 { 1026 struct tl_softc *sc; 1027 int r = 0, total_len = 0; 1028 struct ether_header *eh; 1029 struct mbuf *m; 1030 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1031 struct ifnet *ifp; 1032 struct tl_chain_onefrag *cur_rx; 1033 1034 sc = xsc; 1035 ifp = &sc->arpcom.ac_if; 1036 1037 while(sc->tl_cdata.tl_rx_head != NULL) { 1038 cur_rx = sc->tl_cdata.tl_rx_head; 1039 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1040 break; 1041 r++; 1042 sc->tl_cdata.tl_rx_head = cur_rx->tl_next; 1043 m = cur_rx->tl_mbuf; 1044 total_len = cur_rx->tl_ptr->tlist_frsize; 1045 1046 if (tl_newbuf(sc, cur_rx) == ENOBUFS) { 1047 ifp->if_ierrors++; 1048 cur_rx->tl_ptr->tlist_frsize = MCLBYTES; 1049 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1050 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1051 continue; 1052 } 1053 1054 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = 1055 VTOPHYS(cur_rx->tl_ptr); 1056 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; 1057 sc->tl_cdata.tl_rx_tail = cur_rx; 1058 1059 eh = mtod(m, struct ether_header *); 1060 1061 /* 1062 * Note: when the ThunderLAN chip is in 'capture all 1063 * frames' mode, it will receive its own transmissions. 1064 * We drop don't need to process our own transmissions, 1065 * so we drop them here and continue. 1066 */ 1067 /*if (ifp->if_flags & IFF_PROMISC && */ 1068 if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, 1069 ETHER_ADDR_LEN)) { 1070 m_freem(m); 1071 continue; 1072 } 1073 1074 m->m_pkthdr.len = m->m_len = total_len; 1075 ml_enqueue(&ml, m); 1076 } 1077 1078 if_input(ifp, &ml); 1079 1080 return(r); 1081 } 1082 1083 /* 1084 * The RX-EOC condition hits when the ch_parm address hasn't been 1085 * initialized or the adapter reached a list with a forward pointer 1086 * of 0 (which indicates the end of the chain). In our case, this means 1087 * the card has hit the end of the receive buffer chain and we need to 1088 * empty out the buffers and shift the pointer back to the beginning again. 1089 */ 1090 int 1091 tl_intvec_rxeoc(void *xsc, u_int32_t type) 1092 { 1093 struct tl_softc *sc; 1094 int r; 1095 struct tl_chain_data *cd; 1096 1097 sc = xsc; 1098 cd = &sc->tl_cdata; 1099 1100 /* Flush out the receive queue and ack RXEOF interrupts. */ 1101 r = tl_intvec_rxeof(xsc, type); 1102 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); 1103 r = 1; 1104 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1105 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1106 CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(sc->tl_cdata.tl_rx_head->tl_ptr)); 1107 r |= (TL_CMD_GO|TL_CMD_RT); 1108 return(r); 1109 } 1110 1111 int 1112 tl_intvec_txeof(void *xsc, u_int32_t type) 1113 { 1114 struct tl_softc *sc; 1115 int r = 0; 1116 struct tl_chain *cur_tx; 1117 1118 sc = xsc; 1119 1120 /* 1121 * Go through our tx list and free mbufs for those 1122 * frames that have been sent. 1123 */ 1124 while (sc->tl_cdata.tl_tx_head != NULL) { 1125 cur_tx = sc->tl_cdata.tl_tx_head; 1126 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1127 break; 1128 sc->tl_cdata.tl_tx_head = cur_tx->tl_next; 1129 1130 r++; 1131 m_freem(cur_tx->tl_mbuf); 1132 cur_tx->tl_mbuf = NULL; 1133 1134 cur_tx->tl_next = sc->tl_cdata.tl_tx_free; 1135 sc->tl_cdata.tl_tx_free = cur_tx; 1136 if (!cur_tx->tl_ptr->tlist_fptr) 1137 break; 1138 } 1139 1140 return(r); 1141 } 1142 1143 /* 1144 * The transmit end of channel interrupt. The adapter triggers this 1145 * interrupt to tell us it hit the end of the current transmit list. 1146 * 1147 * A note about this: it's possible for a condition to arise where 1148 * tl_start() may try to send frames between TXEOF and TXEOC interrupts. 1149 * You have to avoid this since the chip expects things to go in a 1150 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. 1151 * When the TXEOF handler is called, it will free all of the transmitted 1152 * frames and reset the tx_head pointer to NULL. However, a TXEOC 1153 * interrupt should be received and acknowledged before any more frames 1154 * are queued for transmission. If tl_statrt() is called after TXEOF 1155 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, 1156 * it could attempt to issue a transmit command prematurely. 1157 * 1158 * To guard against this, tl_start() will only issue transmit commands 1159 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler 1160 * can set this flag once tl_start() has cleared it. 1161 */ 1162 int 1163 tl_intvec_txeoc(void *xsc, u_int32_t type) 1164 { 1165 struct tl_softc *sc; 1166 struct ifnet *ifp; 1167 u_int32_t cmd; 1168 1169 sc = xsc; 1170 ifp = &sc->arpcom.ac_if; 1171 1172 /* Clear the timeout timer. */ 1173 ifp->if_timer = 0; 1174 1175 if (sc->tl_cdata.tl_tx_head == NULL) { 1176 ifq_clr_oactive(&ifp->if_snd); 1177 sc->tl_cdata.tl_tx_tail = NULL; 1178 sc->tl_txeoc = 1; 1179 } else { 1180 sc->tl_txeoc = 0; 1181 /* First we have to ack the EOC interrupt. */ 1182 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); 1183 /* Then load the address of the next TX list. */ 1184 CSR_WRITE_4(sc, TL_CH_PARM, 1185 VTOPHYS(sc->tl_cdata.tl_tx_head->tl_ptr)); 1186 /* Restart TX channel. */ 1187 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1188 cmd &= ~TL_CMD_RT; 1189 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1190 CMD_PUT(sc, cmd); 1191 return(0); 1192 } 1193 1194 return(1); 1195 } 1196 1197 int 1198 tl_intvec_adchk(void *xsc, u_int32_t type) 1199 { 1200 struct tl_softc *sc; 1201 1202 sc = xsc; 1203 1204 if (type) 1205 printf("%s: adapter check: %x\n", sc->sc_dev.dv_xname, 1206 (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); 1207 1208 tl_softreset(sc, 1); 1209 tl_stop(sc); 1210 tl_init(sc); 1211 CMD_SET(sc, TL_CMD_INTSON); 1212 1213 return(0); 1214 } 1215 1216 int 1217 tl_intvec_netsts(void *xsc, u_int32_t type) 1218 { 1219 struct tl_softc *sc; 1220 u_int16_t netsts; 1221 1222 sc = xsc; 1223 1224 netsts = tl_dio_read16(sc, TL_NETSTS); 1225 tl_dio_write16(sc, TL_NETSTS, netsts); 1226 1227 printf("%s: network status: %x\n", sc->sc_dev.dv_xname, netsts); 1228 1229 return(1); 1230 } 1231 1232 int 1233 tl_intr(void *xsc) 1234 { 1235 struct tl_softc *sc; 1236 struct ifnet *ifp; 1237 int r = 0; 1238 u_int32_t type = 0; 1239 u_int16_t ints = 0; 1240 u_int8_t ivec = 0; 1241 1242 sc = xsc; 1243 1244 /* Disable interrupts */ 1245 ints = CSR_READ_2(sc, TL_HOST_INT); 1246 CSR_WRITE_2(sc, TL_HOST_INT, ints); 1247 type = (ints << 16) & 0xFFFF0000; 1248 ivec = (ints & TL_VEC_MASK) >> 5; 1249 ints = (ints & TL_INT_MASK) >> 2; 1250 1251 ifp = &sc->arpcom.ac_if; 1252 1253 switch(ints) { 1254 case (TL_INTR_INVALID): 1255 /* Re-enable interrupts but don't ack this one. */ 1256 CMD_PUT(sc, type); 1257 r = 0; 1258 break; 1259 case (TL_INTR_TXEOF): 1260 r = tl_intvec_txeof((void *)sc, type); 1261 break; 1262 case (TL_INTR_TXEOC): 1263 r = tl_intvec_txeoc((void *)sc, type); 1264 break; 1265 case (TL_INTR_STATOFLOW): 1266 tl_stats_update(sc); 1267 r = 1; 1268 break; 1269 case (TL_INTR_RXEOF): 1270 r = tl_intvec_rxeof((void *)sc, type); 1271 break; 1272 case (TL_INTR_DUMMY): 1273 printf("%s: got a dummy interrupt\n", sc->sc_dev.dv_xname); 1274 r = 1; 1275 break; 1276 case (TL_INTR_ADCHK): 1277 if (ivec) 1278 r = tl_intvec_adchk((void *)sc, type); 1279 else 1280 r = tl_intvec_netsts((void *)sc, type); 1281 break; 1282 case (TL_INTR_RXEOC): 1283 r = tl_intvec_rxeoc((void *)sc, type); 1284 break; 1285 default: 1286 printf("%s: bogus interrupt type\n", sc->sc_dev.dv_xname); 1287 break; 1288 } 1289 1290 /* Re-enable interrupts */ 1291 if (r) { 1292 CMD_PUT(sc, TL_CMD_ACK | r | type); 1293 } 1294 1295 if (!ifq_empty(&ifp->if_snd)) 1296 tl_start(ifp); 1297 1298 return r; 1299 } 1300 1301 void 1302 tl_stats_update(void *xsc) 1303 { 1304 struct tl_softc *sc; 1305 struct ifnet *ifp; 1306 struct tl_stats tl_stats; 1307 u_int32_t *p; 1308 int s; 1309 1310 s = splnet(); 1311 1312 bzero(&tl_stats, sizeof(struct tl_stats)); 1313 1314 sc = xsc; 1315 ifp = &sc->arpcom.ac_if; 1316 1317 p = (u_int32_t *)&tl_stats; 1318 1319 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 1320 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1321 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1322 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1323 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1324 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1325 1326 ifp->if_collisions += tl_stats.tl_tx_single_collision + 1327 tl_stats.tl_tx_multi_collision; 1328 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + 1329 tl_rx_overrun(tl_stats); 1330 ifp->if_oerrors += tl_tx_underrun(tl_stats); 1331 1332 if (tl_tx_underrun(tl_stats)) { 1333 u_int8_t tx_thresh; 1334 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; 1335 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { 1336 tx_thresh >>= 4; 1337 tx_thresh++; 1338 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1339 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); 1340 } 1341 } 1342 1343 timeout_add_sec(&sc->tl_stats_tmo, 1); 1344 1345 if (!sc->tl_bitrate) 1346 mii_tick(&sc->sc_mii); 1347 1348 splx(s); 1349 } 1350 1351 /* 1352 * Encapsulate an mbuf chain in a list by coupling the mbuf data 1353 * pointers to the fragment pointers. 1354 */ 1355 int 1356 tl_encap(struct tl_softc *sc, struct tl_chain *c, struct mbuf *m_head) 1357 { 1358 int frag = 0; 1359 struct tl_frag *f = NULL; 1360 int total_len; 1361 struct mbuf *m; 1362 1363 /* 1364 * Start packing the mbufs in this chain into 1365 * the fragment pointers. Stop when we run out 1366 * of fragments or hit the end of the mbuf chain. 1367 */ 1368 m = m_head; 1369 total_len = 0; 1370 1371 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1372 if (m->m_len != 0) { 1373 if (frag == TL_MAXFRAGS) 1374 break; 1375 total_len+= m->m_len; 1376 c->tl_ptr->tl_frag[frag].tlist_dadr = 1377 VTOPHYS(mtod(m, vaddr_t)); 1378 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; 1379 frag++; 1380 } 1381 } 1382 1383 /* 1384 * Handle special cases. 1385 * Special case #1: we used up all 10 fragments, but 1386 * we have more mbufs left in the chain. Copy the 1387 * data into an mbuf cluster. Note that we don't 1388 * bother clearing the values in the other fragment 1389 * pointers/counters; it wouldn't gain us anything, 1390 * and would waste cycles. 1391 */ 1392 if (m != NULL) { 1393 struct mbuf *m_new = NULL; 1394 1395 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1396 if (m_new == NULL) 1397 return(1); 1398 if (m_head->m_pkthdr.len > MHLEN) { 1399 MCLGET(m_new, M_DONTWAIT); 1400 if (!(m_new->m_flags & M_EXT)) { 1401 m_freem(m_new); 1402 return(1); 1403 } 1404 } 1405 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1406 mtod(m_new, caddr_t)); 1407 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1408 m_freem(m_head); 1409 m_head = m_new; 1410 f = &c->tl_ptr->tl_frag[0]; 1411 f->tlist_dadr = VTOPHYS(mtod(m_new, caddr_t)); 1412 f->tlist_dcnt = total_len = m_new->m_len; 1413 frag = 1; 1414 } 1415 1416 /* 1417 * Special case #2: the frame is smaller than the minimum 1418 * frame size. We have to pad it to make the chip happy. 1419 */ 1420 if (total_len < TL_MIN_FRAMELEN) { 1421 f = &c->tl_ptr->tl_frag[frag]; 1422 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; 1423 f->tlist_dadr = VTOPHYS(&sc->tl_ldata->tl_pad); 1424 total_len += f->tlist_dcnt; 1425 frag++; 1426 } 1427 1428 c->tl_mbuf = m_head; 1429 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; 1430 c->tl_ptr->tlist_frsize = total_len; 1431 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1432 c->tl_ptr->tlist_fptr = 0; 1433 1434 return(0); 1435 } 1436 1437 /* 1438 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1439 * to the mbuf data regions directly in the transmit lists. We also save a 1440 * copy of the pointers since the transmit list fragment pointers are 1441 * physical addresses. 1442 */ 1443 void 1444 tl_start(struct ifnet *ifp) 1445 { 1446 struct tl_softc *sc; 1447 struct mbuf *m_head = NULL; 1448 u_int32_t cmd; 1449 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1450 1451 sc = ifp->if_softc; 1452 1453 /* 1454 * Check for an available queue slot. If there are none, 1455 * punt. 1456 */ 1457 if (sc->tl_cdata.tl_tx_free == NULL) { 1458 ifq_set_oactive(&ifp->if_snd); 1459 return; 1460 } 1461 1462 start_tx = sc->tl_cdata.tl_tx_free; 1463 1464 while(sc->tl_cdata.tl_tx_free != NULL) { 1465 m_head = ifq_dequeue(&ifp->if_snd); 1466 if (m_head == NULL) 1467 break; 1468 1469 /* Pick a chain member off the free list. */ 1470 cur_tx = sc->tl_cdata.tl_tx_free; 1471 sc->tl_cdata.tl_tx_free = cur_tx->tl_next; 1472 1473 cur_tx->tl_next = NULL; 1474 1475 /* Pack the data into the list. */ 1476 tl_encap(sc, cur_tx, m_head); 1477 1478 /* Chain it together */ 1479 if (prev != NULL) { 1480 prev->tl_next = cur_tx; 1481 prev->tl_ptr->tlist_fptr = VTOPHYS(cur_tx->tl_ptr); 1482 } 1483 prev = cur_tx; 1484 1485 /* 1486 * If there's a BPF listener, bounce a copy of this frame 1487 * to him. 1488 */ 1489 #if NBPFILTER > 0 1490 if (ifp->if_bpf) 1491 bpf_mtap(ifp->if_bpf, cur_tx->tl_mbuf, 1492 BPF_DIRECTION_OUT); 1493 #endif 1494 } 1495 1496 /* 1497 * If there are no packets queued, bail. 1498 */ 1499 if (cur_tx == NULL) 1500 return; 1501 1502 /* 1503 * That's all we can stands, we can't stands no more. 1504 * If there are no other transfers pending, then issue the 1505 * TX GO command to the adapter to start things moving. 1506 * Otherwise, just leave the data in the queue and let 1507 * the EOF/EOC interrupt handler send. 1508 */ 1509 if (sc->tl_cdata.tl_tx_head == NULL) { 1510 sc->tl_cdata.tl_tx_head = start_tx; 1511 sc->tl_cdata.tl_tx_tail = cur_tx; 1512 1513 if (sc->tl_txeoc) { 1514 sc->tl_txeoc = 0; 1515 CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(start_tx->tl_ptr)); 1516 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1517 cmd &= ~TL_CMD_RT; 1518 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1519 CMD_PUT(sc, cmd); 1520 } 1521 } else { 1522 sc->tl_cdata.tl_tx_tail->tl_next = start_tx; 1523 sc->tl_cdata.tl_tx_tail = cur_tx; 1524 } 1525 1526 /* 1527 * Set a timeout in case the chip goes out to lunch. 1528 */ 1529 ifp->if_timer = 10; 1530 } 1531 1532 void 1533 tl_init(void *xsc) 1534 { 1535 struct tl_softc *sc = xsc; 1536 struct ifnet *ifp = &sc->arpcom.ac_if; 1537 int s; 1538 1539 s = splnet(); 1540 1541 /* 1542 * Cancel pending I/O. 1543 */ 1544 tl_stop(sc); 1545 1546 /* Initialize TX FIFO threshold */ 1547 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1548 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); 1549 1550 /* Set PCI burst size */ 1551 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); 1552 1553 tl_dio_write16(sc, TL_MAXRX, MCLBYTES); 1554 1555 /* Init our MAC address */ 1556 tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0); 1557 1558 /* Program promiscuous mode and multicast filters. */ 1559 tl_iff(sc); 1560 1561 /* Init circular RX list. */ 1562 if (tl_list_rx_init(sc) == ENOBUFS) { 1563 printf("%s: initialization failed: no memory for rx buffers\n", 1564 sc->sc_dev.dv_xname); 1565 tl_stop(sc); 1566 splx(s); 1567 return; 1568 } 1569 1570 /* Init TX pointers. */ 1571 tl_list_tx_init(sc); 1572 1573 /* Enable PCI interrupts. */ 1574 CMD_SET(sc, TL_CMD_INTSON); 1575 1576 /* Load the address of the rx list */ 1577 CMD_SET(sc, TL_CMD_RT); 1578 CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(&sc->tl_ldata->tl_rx_list[0])); 1579 1580 if (!sc->tl_bitrate) 1581 mii_mediachg(&sc->sc_mii); 1582 else 1583 tl_ifmedia_upd(ifp); 1584 1585 /* Send the RX go command */ 1586 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); 1587 1588 splx(s); 1589 1590 /* Start the stats update counter */ 1591 timeout_set(&sc->tl_stats_tmo, tl_stats_update, sc); 1592 timeout_add_sec(&sc->tl_stats_tmo, 1); 1593 timeout_set(&sc->tl_wait_tmo, tl_wait_up, sc); 1594 timeout_add_sec(&sc->tl_wait_tmo, 2); 1595 } 1596 1597 /* 1598 * Set media options. 1599 */ 1600 int 1601 tl_ifmedia_upd(struct ifnet *ifp) 1602 { 1603 struct tl_softc *sc = ifp->if_softc; 1604 1605 if (sc->tl_bitrate) 1606 tl_setmode(sc, sc->ifmedia.ifm_media); 1607 else 1608 mii_mediachg(&sc->sc_mii); 1609 1610 return(0); 1611 } 1612 1613 /* 1614 * Report current media status. 1615 */ 1616 void 1617 tl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1618 { 1619 struct tl_softc *sc; 1620 struct mii_data *mii; 1621 1622 sc = ifp->if_softc; 1623 mii = &sc->sc_mii; 1624 1625 ifmr->ifm_active = IFM_ETHER; 1626 if (sc->tl_bitrate) { 1627 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) 1628 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 1629 else 1630 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 1631 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) 1632 ifmr->ifm_active |= IFM_HDX; 1633 else 1634 ifmr->ifm_active |= IFM_FDX; 1635 return; 1636 } else { 1637 mii_pollstat(mii); 1638 ifmr->ifm_active = mii->mii_media_active; 1639 ifmr->ifm_status = mii->mii_media_status; 1640 } 1641 } 1642 1643 int 1644 tl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1645 { 1646 struct tl_softc *sc = ifp->if_softc; 1647 struct ifreq *ifr = (struct ifreq *) data; 1648 int s, error = 0; 1649 1650 s = splnet(); 1651 1652 switch(command) { 1653 case SIOCSIFADDR: 1654 ifp->if_flags |= IFF_UP; 1655 if (!(ifp->if_flags & IFF_RUNNING)) 1656 tl_init(sc); 1657 break; 1658 1659 case SIOCSIFFLAGS: 1660 if (ifp->if_flags & IFF_UP) { 1661 if (ifp->if_flags & IFF_RUNNING) 1662 error = ENETRESET; 1663 else 1664 tl_init(sc); 1665 } else { 1666 if (ifp->if_flags & IFF_RUNNING) 1667 tl_stop(sc); 1668 } 1669 break; 1670 1671 case SIOCSIFMEDIA: 1672 case SIOCGIFMEDIA: 1673 if (sc->tl_bitrate) 1674 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1675 else 1676 error = ifmedia_ioctl(ifp, ifr, 1677 &sc->sc_mii.mii_media, command); 1678 break; 1679 1680 default: 1681 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1682 } 1683 1684 if (error == ENETRESET) { 1685 if (ifp->if_flags & IFF_RUNNING) 1686 tl_iff(sc); 1687 error = 0; 1688 } 1689 1690 splx(s); 1691 return(error); 1692 } 1693 1694 void 1695 tl_watchdog(struct ifnet *ifp) 1696 { 1697 struct tl_softc *sc; 1698 1699 sc = ifp->if_softc; 1700 1701 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1702 1703 ifp->if_oerrors++; 1704 1705 tl_softreset(sc, 1); 1706 tl_init(sc); 1707 } 1708 1709 /* 1710 * Stop the adapter and free any mbufs allocated to the 1711 * RX and TX lists. 1712 */ 1713 void 1714 tl_stop(struct tl_softc *sc) 1715 { 1716 int i; 1717 struct ifnet *ifp; 1718 1719 ifp = &sc->arpcom.ac_if; 1720 1721 /* Stop the stats updater. */ 1722 timeout_del(&sc->tl_stats_tmo); 1723 timeout_del(&sc->tl_wait_tmo); 1724 1725 /* Stop the transmitter */ 1726 CMD_CLR(sc, TL_CMD_RT); 1727 CMD_SET(sc, TL_CMD_STOP); 1728 CSR_WRITE_4(sc, TL_CH_PARM, 0); 1729 1730 /* Stop the receiver */ 1731 CMD_SET(sc, TL_CMD_RT); 1732 CMD_SET(sc, TL_CMD_STOP); 1733 CSR_WRITE_4(sc, TL_CH_PARM, 0); 1734 1735 /* 1736 * Disable host interrupts. 1737 */ 1738 CMD_SET(sc, TL_CMD_INTSOFF); 1739 1740 /* 1741 * Clear list pointer. 1742 */ 1743 CSR_WRITE_4(sc, TL_CH_PARM, 0); 1744 1745 /* 1746 * Free the RX lists. 1747 */ 1748 for (i = 0; i < TL_RX_LIST_CNT; i++) { 1749 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { 1750 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); 1751 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; 1752 } 1753 } 1754 bzero(&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list)); 1755 1756 /* 1757 * Free the TX list buffers. 1758 */ 1759 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1760 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { 1761 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); 1762 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; 1763 } 1764 } 1765 bzero(&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list)); 1766 1767 ifp->if_flags &= ~IFF_RUNNING; 1768 ifq_clr_oactive(&ifp->if_snd); 1769 } 1770 1771 int 1772 tl_probe(struct device *parent, void *match, void *aux) 1773 { 1774 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 1775 1776 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) { 1777 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TI_TLAN) 1778 return 1; 1779 return 0; 1780 } 1781 1782 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ) { 1783 switch (PCI_PRODUCT(pa->pa_id)) { 1784 case PCI_PRODUCT_COMPAQ_N100TX: 1785 case PCI_PRODUCT_COMPAQ_N10T: 1786 case PCI_PRODUCT_COMPAQ_INTNF3P: 1787 case PCI_PRODUCT_COMPAQ_DPNET100TX: 1788 case PCI_PRODUCT_COMPAQ_INTPL100TX: 1789 case PCI_PRODUCT_COMPAQ_DP4000: 1790 case PCI_PRODUCT_COMPAQ_N10T2: 1791 case PCI_PRODUCT_COMPAQ_N10_TX_UTP: 1792 case PCI_PRODUCT_COMPAQ_NF3P: 1793 case PCI_PRODUCT_COMPAQ_NF3P_BNC: 1794 return 1; 1795 } 1796 return 0; 1797 } 1798 1799 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) { 1800 switch (PCI_PRODUCT(pa->pa_id)) { 1801 case PCI_PRODUCT_OLICOM_OC2183: 1802 case PCI_PRODUCT_OLICOM_OC2325: 1803 case PCI_PRODUCT_OLICOM_OC2326: 1804 return 1; 1805 } 1806 return 0; 1807 } 1808 1809 return 0; 1810 } 1811 1812 void 1813 tl_attach(struct device *parent, struct device *self, void *aux) 1814 { 1815 struct tl_softc *sc = (struct tl_softc *)self; 1816 struct pci_attach_args *pa = aux; 1817 pci_chipset_tag_t pc = pa->pa_pc; 1818 pci_intr_handle_t ih; 1819 const char *intrstr = NULL; 1820 struct ifnet *ifp = &sc->arpcom.ac_if; 1821 bus_size_t iosize; 1822 u_int32_t command; 1823 int i, rseg; 1824 bus_dma_segment_t seg; 1825 bus_dmamap_t dmamap; 1826 caddr_t kva; 1827 1828 /* 1829 * Map control/status registers. 1830 */ 1831 1832 #ifdef TL_USEIOSPACE 1833 if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 1834 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) { 1835 if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_IO, 0, 1836 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) { 1837 printf(": can't map i/o space\n"); 1838 return; 1839 } 1840 } 1841 #else 1842 if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 1843 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){ 1844 if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_MEM, 0, 1845 &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){ 1846 printf(": can't map mem space\n"); 1847 return; 1848 } 1849 } 1850 #endif 1851 1852 /* 1853 * Manual wants the PCI latency timer jacked up to 0xff 1854 */ 1855 command = pci_conf_read(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER); 1856 command |= 0x0000ff00; 1857 pci_conf_write(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER, command); 1858 1859 /* 1860 * Allocate our interrupt. 1861 */ 1862 if (pci_intr_map(pa, &ih)) { 1863 printf(": couldn't map interrupt\n"); 1864 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 1865 return; 1866 } 1867 intrstr = pci_intr_string(pc, ih); 1868 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, tl_intr, sc, 1869 self->dv_xname); 1870 if (sc->sc_ih == NULL) { 1871 printf(": could not establish interrupt"); 1872 if (intrstr != NULL) 1873 printf(" at %s", intrstr); 1874 printf("\n"); 1875 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 1876 return; 1877 } 1878 printf(": %s", intrstr); 1879 1880 sc->sc_dmat = pa->pa_dmat; 1881 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct tl_list_data), 1882 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1883 printf("%s: can't alloc list\n", sc->sc_dev.dv_xname); 1884 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 1885 return; 1886 } 1887 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct tl_list_data), 1888 &kva, BUS_DMA_NOWAIT)) { 1889 printf("%s: can't map dma buffers (%zd bytes)\n", 1890 sc->sc_dev.dv_xname, sizeof(struct tl_list_data)); 1891 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1892 return; 1893 } 1894 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct tl_list_data), 1, 1895 sizeof(struct tl_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 1896 printf("%s: can't create dma map\n", sc->sc_dev.dv_xname); 1897 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data)); 1898 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1899 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 1900 return; 1901 } 1902 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, 1903 sizeof(struct tl_list_data), NULL, BUS_DMA_NOWAIT)) { 1904 printf("%s: can't load dma map\n", sc->sc_dev.dv_xname); 1905 bus_dmamap_destroy(sc->sc_dmat, dmamap); 1906 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data)); 1907 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1908 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 1909 return; 1910 } 1911 sc->tl_ldata = (struct tl_list_data *)kva; 1912 1913 for (sc->tl_product = tl_prods; sc->tl_product->tp_vend; 1914 sc->tl_product++) { 1915 if (sc->tl_product->tp_vend == PCI_VENDOR(pa->pa_id) && 1916 sc->tl_product->tp_prod == PCI_PRODUCT(pa->pa_id)) 1917 break; 1918 } 1919 1920 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ || 1921 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) 1922 sc->tl_eeaddr = TL_EEPROM_EADDR; 1923 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) 1924 sc->tl_eeaddr = TL_EEPROM_EADDR_OC; 1925 1926 /* 1927 * Reset adapter. 1928 */ 1929 tl_softreset(sc, 1); 1930 tl_hardreset(self); 1931 DELAY(1000000); 1932 tl_softreset(sc, 1); 1933 1934 /* 1935 * Get station address from the EEPROM. 1936 */ 1937 if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1938 sc->tl_eeaddr, ETHER_ADDR_LEN)) { 1939 printf("\n%s: failed to read station address\n", 1940 sc->sc_dev.dv_xname); 1941 bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize); 1942 return; 1943 } 1944 1945 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) { 1946 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1947 u_int16_t *p; 1948 1949 p = (u_int16_t *)&sc->arpcom.ac_enaddr[i]; 1950 *p = ntohs(*p); 1951 } 1952 } 1953 1954 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 1955 1956 ifp = &sc->arpcom.ac_if; 1957 ifp->if_softc = sc; 1958 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1959 ifp->if_ioctl = tl_ioctl; 1960 ifp->if_start = tl_start; 1961 ifp->if_watchdog = tl_watchdog; 1962 ifq_init_maxlen(&ifp->if_snd, TL_TX_LIST_CNT - 1); 1963 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1964 1965 ifp->if_capabilities = IFCAP_VLAN_MTU; 1966 1967 /* 1968 * Reset adapter (again). 1969 */ 1970 tl_softreset(sc, 1); 1971 tl_hardreset(self); 1972 DELAY(1000000); 1973 tl_softreset(sc, 1); 1974 1975 /* 1976 * Do MII setup. If no PHYs are found, then this is a 1977 * bitrate ThunderLAN chip that only supports 10baseT 1978 * and AUI/BNC. 1979 */ 1980 sc->sc_mii.mii_ifp = ifp; 1981 sc->sc_mii.mii_readreg = tl_miibus_readreg; 1982 sc->sc_mii.mii_writereg = tl_miibus_writereg; 1983 sc->sc_mii.mii_statchg = tl_miibus_statchg; 1984 ifmedia_init(&sc->sc_mii.mii_media, 0, tl_ifmedia_upd, tl_ifmedia_sts); 1985 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 1986 0); 1987 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1988 struct ifmedia *ifm; 1989 sc->tl_bitrate = 1; 1990 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); 1991 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1992 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 1993 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1994 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); 1995 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); 1996 /* Reset again, this time setting bitrate mode. */ 1997 tl_softreset(sc, 1); 1998 ifm = &sc->ifmedia; 1999 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2000 tl_ifmedia_upd(ifp); 2001 } else 2002 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2003 2004 /* 2005 * Attach us everywhere. 2006 */ 2007 if_attach(ifp); 2008 ether_ifattach(ifp); 2009 } 2010 2011 void 2012 tl_wait_up(void *xsc) 2013 { 2014 struct tl_softc *sc = xsc; 2015 struct ifnet *ifp = &sc->arpcom.ac_if; 2016 2017 ifp->if_flags |= IFF_RUNNING; 2018 ifq_clr_oactive(&ifp->if_snd); 2019 } 2020 2021 const struct cfattach tl_ca = { 2022 sizeof(struct tl_softc), tl_probe, tl_attach 2023 }; 2024 2025 struct cfdriver tl_cd = { 2026 NULL, "tl", DV_IFNET 2027 }; 2028