1 /* $OpenBSD: xl.c,v 1.50 2003/06/29 16:39:02 jason Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $ 35 */ 36 37 /* 38 * 3Com 3c90x Etherlink XL PCI NIC driver 39 * 40 * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI 41 * bus-master chips (3c90x cards and embedded controllers) including 42 * the following: 43 * 44 * 3Com 3c900-TPO 10Mbps/RJ-45 45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC 46 * 3Com 3c905-TX 10/100Mbps/RJ-45 47 * 3Com 3c905-T4 10/100Mbps/RJ-45 48 * 3Com 3c900B-TPO 10Mbps/RJ-45 49 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC 50 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC 51 * 3Com 3c900B-FL 10Mbps/Fiber-optic 52 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC 53 * 3Com 3c905B-TX 10/100Mbps/RJ-45 54 * 3Com 3c900-FL/FX 10/100Mbps/Fiber-optic 55 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) 56 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) 57 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 58 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 59 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 60 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) 61 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) 62 * 3Com 3C575TX 10/100Mbps LAN CardBus PC Card 63 * 3Com 3CCFE575BT 10/100Mbps LAN CardBus PC Card 64 * 3Com 3CCFE575CT 10/100Mbps LAN CardBus PC Card 65 * 3Com 3C3FE575CT 10/100Mbps LAN CardBus Type III PC Card 66 * 3Com 3CCFEM656 10/100Mbps LAN+56k Modem CardBus PC Card 67 * 3Com 3CCFEM656B 10/100Mbps LAN+56k Modem CardBus PC Card 68 * 3Com 3CCFEM656C 10/100Mbps LAN+56k Global Modem CardBus PC Card 69 * 3Com 3C3FEM656C 10/100Mbps LAN+56k Global Modem CardBus Type III PC Card 70 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) 71 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 72 * Dell on-board 3c920 10/100Mbps/RJ-45 73 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 74 * Dell Latitude laptop docking station embedded 3c905-TX 75 * 76 * Written by Bill Paul <wpaul@ctr.columbia.edu> 77 * Electrical Engineering Department 78 * Columbia University, New York City 79 */ 80 81 /* 82 * The 3c90x series chips use a bus-master DMA interface for transfering 83 * packets to and from the controller chip. Some of the "vortex" cards 84 * (3c59x) also supported a bus master mode, however for those chips 85 * you could only DMA packets to/from a contiguous memory buffer. For 86 * transmission this would mean copying the contents of the queued mbuf 87 * chain into a an mbuf cluster and then DMAing the cluster. This extra 88 * copy would sort of defeat the purpose of the bus master support for 89 * any packet that doesn't fit into a single mbuf. 90 * 91 * By contrast, the 3c90x cards support a fragment-based bus master 92 * mode where mbuf chains can be encapsulated using TX descriptors. 93 * This is similar to other PCI chips such as the Texas Instruments 94 * ThunderLAN and the Intel 82557/82558. 95 * 96 * The "vortex" driver (if_vx.c) happens to work for the "boomerang" 97 * bus master chips because they maintain the old PIO interface for 98 * backwards compatibility, but starting with the 3c905B and the 99 * "cyclone" chips, the compatibility interface has been dropped. 100 * Since using bus master DMA is a big win, we use this driver to 101 * support the PCI "boomerang" chips even though they work with the 102 * "vortex" driver in order to obtain better performance. 103 */ 104 105 #include "bpfilter.h" 106 #include "vlan.h" 107 108 #include <sys/param.h> 109 #include <sys/systm.h> 110 #include <sys/mbuf.h> 111 #include <sys/protosw.h> 112 #include <sys/socket.h> 113 #include <sys/ioctl.h> 114 #include <sys/errno.h> 115 #include <sys/malloc.h> 116 #include <sys/kernel.h> 117 #include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */ 118 #include <sys/device.h> 119 120 #include <net/if.h> 121 #include <net/if_dl.h> 122 #include <net/if_types.h> 123 #include <net/if_media.h> 124 125 #ifdef INET 126 #include <netinet/in.h> 127 #include <netinet/in_systm.h> 128 #include <netinet/in_var.h> 129 #include <netinet/ip.h> 130 #include <netinet/if_ether.h> 131 #endif 132 133 #include <dev/mii/mii.h> 134 #include <dev/mii/miivar.h> 135 136 #include <machine/bus.h> 137 138 #if NBPFILTER > 0 139 #include <net/bpf.h> 140 #endif 141 142 #include <dev/ic/xlreg.h> 143 144 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); 145 void xl_stats_update(void *); 146 int xl_encap(struct xl_softc *, struct xl_chain *, 147 struct mbuf * ); 148 int xl_encap_90xB(struct xl_softc *, struct xl_chain *, 149 struct mbuf * ); 150 void xl_rxeof(struct xl_softc *); 151 int xl_rx_resync(struct xl_softc *); 152 void xl_txeof(struct xl_softc *); 153 void xl_txeof_90xB(struct xl_softc *); 154 void xl_txeoc(struct xl_softc *); 155 int xl_intr(void *); 156 void xl_start(struct ifnet *); 157 void xl_start_90xB(struct ifnet *); 158 int xl_ioctl(struct ifnet *, u_long, caddr_t); 159 void xl_init(void *); 160 void xl_stop(struct xl_softc *); 161 void xl_freetxrx(struct xl_softc *); 162 void xl_watchdog(struct ifnet *); 163 void xl_shutdown(void *); 164 int xl_ifmedia_upd(struct ifnet *); 165 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 166 167 int xl_eeprom_wait(struct xl_softc *); 168 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); 169 void xl_mii_sync(struct xl_softc *); 170 void xl_mii_send(struct xl_softc *, u_int32_t, int); 171 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *); 172 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *); 173 174 void xl_setcfg(struct xl_softc *); 175 void xl_setmode(struct xl_softc *, int); 176 u_int8_t xl_calchash(caddr_t); 177 void xl_setmulti(struct xl_softc *); 178 void xl_setmulti_hash(struct xl_softc *); 179 void xl_reset(struct xl_softc *, int); 180 int xl_list_rx_init(struct xl_softc *); 181 int xl_list_tx_init(struct xl_softc *); 182 int xl_list_tx_init_90xB(struct xl_softc *); 183 void xl_wait(struct xl_softc *); 184 void xl_mediacheck(struct xl_softc *); 185 void xl_choose_xcvr(struct xl_softc *, int); 186 #ifdef notdef 187 void xl_testpacket(struct xl_softc *); 188 #endif 189 190 int xl_miibus_readreg(struct device *, int, int); 191 void xl_miibus_writereg(struct device *, int, int, int); 192 void xl_miibus_statchg(struct device *); 193 194 void xl_power(int, void *); 195 196 void 197 xl_power(why, arg) 198 int why; 199 void *arg; 200 { 201 struct xl_softc *sc = arg; 202 struct ifnet *ifp; 203 int s; 204 205 s = splimp(); 206 if (why != PWR_RESUME) 207 xl_stop(sc); 208 else { 209 ifp = &sc->sc_arpcom.ac_if; 210 if (ifp->if_flags & IFF_UP) { 211 xl_reset(sc, 1); 212 xl_init(sc); 213 } 214 } 215 splx(s); 216 } 217 218 /* 219 * Murphy's law says that it's possible the chip can wedge and 220 * the 'command in progress' bit may never clear. Hence, we wait 221 * only a finite amount of time to avoid getting caught in an 222 * infinite loop. Normally this delay routine would be a macro, 223 * but it isn't called during normal operation so we can afford 224 * to make it a function. 225 */ 226 void xl_wait(sc) 227 struct xl_softc *sc; 228 { 229 register int i; 230 231 for (i = 0; i < XL_TIMEOUT; i++) { 232 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 233 break; 234 } 235 236 #ifdef DIAGNOSTIC 237 if (i == XL_TIMEOUT) 238 printf("%s: command never completed!\n", sc->sc_dev.dv_xname); 239 #endif 240 241 return; 242 } 243 244 /* 245 * MII access routines are provided for adapters with external 246 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in 247 * autoneg logic that's faked up to look like a PHY (3c905B-TX). 248 * Note: if you don't perform the MDIO operations just right, 249 * it's possible to end up with code that works correctly with 250 * some chips/CPUs/processor speeds/bus speeds/etc but not 251 * with others. 252 */ 253 #define MII_SET(x) \ 254 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 255 CSR_READ_2(sc, XL_W4_PHY_MGMT) | x) 256 257 #define MII_CLR(x) \ 258 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 259 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x) 260 261 /* 262 * Sync the PHYs by setting data bit and strobing the clock 32 times. 263 */ 264 void xl_mii_sync(sc) 265 struct xl_softc *sc; 266 { 267 register int i; 268 269 XL_SEL_WIN(4); 270 MII_SET(XL_MII_DIR|XL_MII_DATA); 271 272 for (i = 0; i < 32; i++) { 273 MII_SET(XL_MII_CLK); 274 DELAY(1); 275 MII_CLR(XL_MII_CLK); 276 DELAY(1); 277 } 278 279 return; 280 } 281 282 /* 283 * Clock a series of bits through the MII. 284 */ 285 void xl_mii_send(sc, bits, cnt) 286 struct xl_softc *sc; 287 u_int32_t bits; 288 int cnt; 289 { 290 int i; 291 292 XL_SEL_WIN(4); 293 MII_CLR(XL_MII_CLK); 294 295 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 296 if (bits & i) { 297 MII_SET(XL_MII_DATA); 298 } else { 299 MII_CLR(XL_MII_DATA); 300 } 301 DELAY(1); 302 MII_CLR(XL_MII_CLK); 303 DELAY(1); 304 MII_SET(XL_MII_CLK); 305 } 306 } 307 308 /* 309 * Read an PHY register through the MII. 310 */ 311 int xl_mii_readreg(sc, frame) 312 struct xl_softc *sc; 313 struct xl_mii_frame *frame; 314 315 { 316 int i, ack, s; 317 318 s = splimp(); 319 320 /* 321 * Set up frame for RX. 322 */ 323 frame->mii_stdelim = XL_MII_STARTDELIM; 324 frame->mii_opcode = XL_MII_READOP; 325 frame->mii_turnaround = 0; 326 frame->mii_data = 0; 327 328 /* 329 * Select register window 4. 330 */ 331 332 XL_SEL_WIN(4); 333 334 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); 335 /* 336 * Turn on data xmit. 337 */ 338 MII_SET(XL_MII_DIR); 339 340 xl_mii_sync(sc); 341 342 /* 343 * Send command/address info. 344 */ 345 xl_mii_send(sc, frame->mii_stdelim, 2); 346 xl_mii_send(sc, frame->mii_opcode, 2); 347 xl_mii_send(sc, frame->mii_phyaddr, 5); 348 xl_mii_send(sc, frame->mii_regaddr, 5); 349 350 /* Idle bit */ 351 MII_CLR((XL_MII_CLK|XL_MII_DATA)); 352 DELAY(1); 353 MII_SET(XL_MII_CLK); 354 DELAY(1); 355 356 /* Turn off xmit. */ 357 MII_CLR(XL_MII_DIR); 358 359 /* Check for ack */ 360 MII_CLR(XL_MII_CLK); 361 DELAY(1); 362 MII_SET(XL_MII_CLK); 363 DELAY(1); 364 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; 365 366 /* 367 * Now try reading data bits. If the ack failed, we still 368 * need to clock through 16 cycles to keep the PHY(s) in sync. 369 */ 370 if (ack) { 371 for(i = 0; i < 16; i++) { 372 MII_CLR(XL_MII_CLK); 373 DELAY(1); 374 MII_SET(XL_MII_CLK); 375 DELAY(1); 376 } 377 goto fail; 378 } 379 380 for (i = 0x8000; i; i >>= 1) { 381 MII_CLR(XL_MII_CLK); 382 DELAY(1); 383 if (!ack) { 384 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) 385 frame->mii_data |= i; 386 DELAY(1); 387 } 388 MII_SET(XL_MII_CLK); 389 DELAY(1); 390 } 391 392 fail: 393 394 MII_CLR(XL_MII_CLK); 395 DELAY(1); 396 MII_SET(XL_MII_CLK); 397 DELAY(1); 398 399 splx(s); 400 401 if (ack) 402 return(1); 403 return(0); 404 } 405 406 /* 407 * Write to a PHY register through the MII. 408 */ 409 int xl_mii_writereg(sc, frame) 410 struct xl_softc *sc; 411 struct xl_mii_frame *frame; 412 413 { 414 int s; 415 416 s = splimp(); 417 /* 418 * Set up frame for TX. 419 */ 420 421 frame->mii_stdelim = XL_MII_STARTDELIM; 422 frame->mii_opcode = XL_MII_WRITEOP; 423 frame->mii_turnaround = XL_MII_TURNAROUND; 424 425 /* 426 * Select the window 4. 427 */ 428 XL_SEL_WIN(4); 429 430 /* 431 * Turn on data output. 432 */ 433 MII_SET(XL_MII_DIR); 434 435 xl_mii_sync(sc); 436 437 xl_mii_send(sc, frame->mii_stdelim, 2); 438 xl_mii_send(sc, frame->mii_opcode, 2); 439 xl_mii_send(sc, frame->mii_phyaddr, 5); 440 xl_mii_send(sc, frame->mii_regaddr, 5); 441 xl_mii_send(sc, frame->mii_turnaround, 2); 442 xl_mii_send(sc, frame->mii_data, 16); 443 444 /* Idle bit. */ 445 MII_SET(XL_MII_CLK); 446 DELAY(1); 447 MII_CLR(XL_MII_CLK); 448 DELAY(1); 449 450 /* 451 * Turn off xmit. 452 */ 453 MII_CLR(XL_MII_DIR); 454 455 splx(s); 456 457 return(0); 458 } 459 460 int 461 xl_miibus_readreg(self, phy, reg) 462 struct device *self; 463 int phy, reg; 464 { 465 struct xl_softc *sc = (struct xl_softc *)self; 466 struct xl_mii_frame frame; 467 468 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 469 return (0); 470 471 bzero((char *)&frame, sizeof(frame)); 472 473 frame.mii_phyaddr = phy; 474 frame.mii_regaddr = reg; 475 xl_mii_readreg(sc, &frame); 476 477 return(frame.mii_data); 478 } 479 480 void 481 xl_miibus_writereg(self, phy, reg, data) 482 struct device *self; 483 int phy, reg, data; 484 { 485 struct xl_softc *sc = (struct xl_softc *)self; 486 struct xl_mii_frame frame; 487 488 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 489 return; 490 491 bzero((char *)&frame, sizeof(frame)); 492 493 frame.mii_phyaddr = phy; 494 frame.mii_regaddr = reg; 495 frame.mii_data = data; 496 497 xl_mii_writereg(sc, &frame); 498 } 499 500 void 501 xl_miibus_statchg(self) 502 struct device *self; 503 { 504 struct xl_softc *sc = (struct xl_softc *)self; 505 506 xl_setcfg(sc); 507 508 XL_SEL_WIN(3); 509 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 510 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 511 else 512 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 513 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 514 } 515 516 /* 517 * The EEPROM is slow: give it time to come ready after issuing 518 * it a command. 519 */ 520 int xl_eeprom_wait(sc) 521 struct xl_softc *sc; 522 { 523 int i; 524 525 for (i = 0; i < 100; i++) { 526 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) 527 DELAY(162); 528 else 529 break; 530 } 531 532 if (i == 100) { 533 printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname); 534 return(1); 535 } 536 537 return(0); 538 } 539 540 /* 541 * Read a sequence of words from the EEPROM. Note that ethernet address 542 * data is stored in the EEPROM in network byte order. 543 */ 544 int xl_read_eeprom(sc, dest, off, cnt, swap) 545 struct xl_softc *sc; 546 caddr_t dest; 547 int off; 548 int cnt; 549 int swap; 550 { 551 int err = 0, i; 552 u_int16_t word = 0, *ptr; 553 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) 554 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) 555 /* WARNING! DANGER! 556 * It's easy to accidentally overwrite the rom content! 557 * Note: the 3c575 uses 8bit EEPROM offsets. 558 */ 559 XL_SEL_WIN(0); 560 561 if (xl_eeprom_wait(sc)) 562 return(1); 563 564 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) 565 off += 0x30; 566 567 for (i = 0; i < cnt; i++) { 568 if (sc->xl_flags & XL_FLAG_8BITROM) 569 CSR_WRITE_2(sc, XL_W0_EE_CMD, 570 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); 571 else 572 CSR_WRITE_2(sc, XL_W0_EE_CMD, 573 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); 574 err = xl_eeprom_wait(sc); 575 if (err) 576 break; 577 word = CSR_READ_2(sc, XL_W0_EE_DATA); 578 ptr = (u_int16_t *)(dest + (i * 2)); 579 if (swap) 580 *ptr = ntohs(word); 581 else 582 *ptr = word; 583 } 584 585 return(err ? 1 : 0); 586 } 587 588 /* 589 * This routine is taken from the 3Com Etherlink XL manual, 590 * page 10-7. It calculates a CRC of the supplied multicast 591 * group address and returns the lower 8 bits, which are used 592 * as the multicast filter position. 593 * Note: the 3c905B currently only supports a 64-bit hash table, 594 * which means we really only need 6 bits, but the manual indicates 595 * that future chip revisions will have a 256-bit hash table, 596 * hence the routine is set up to calculate 8 bits of position 597 * info in case we need it some day. 598 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a 599 * 256 bit hash table. This means we have to use all 8 bits regardless. 600 * On older cards, the upper 2 bits will be ignored. Grrrr.... 601 */ 602 u_int8_t xl_calchash(addr) 603 caddr_t addr; 604 { 605 u_int32_t crc, carry; 606 int i, j; 607 u_int8_t c; 608 609 /* Compute CRC for the address value. */ 610 crc = 0xFFFFFFFF; /* initial value */ 611 612 for (i = 0; i < 6; i++) { 613 c = *(addr + i); 614 for (j = 0; j < 8; j++) { 615 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 616 crc <<= 1; 617 c >>= 1; 618 if (carry) 619 crc = (crc ^ 0x04c11db6) | carry; 620 } 621 } 622 623 /* return the filter bit position */ 624 return(crc & 0x000000FF); 625 } 626 627 /* 628 * NICs older than the 3c905B have only one multicast option, which 629 * is to enable reception of all multicast frames. 630 */ 631 void xl_setmulti(sc) 632 struct xl_softc *sc; 633 { 634 struct ifnet *ifp; 635 struct arpcom *ac = &sc->sc_arpcom; 636 u_int8_t rxfilt; 637 638 ifp = &sc->sc_arpcom.ac_if; 639 640 XL_SEL_WIN(5); 641 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 642 643 if (ifp->if_flags & IFF_ALLMULTI) { 644 rxfilt |= XL_RXFILTER_ALLMULTI; 645 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 646 return; 647 } 648 649 if (ac->ac_multicnt > 0) 650 rxfilt |= XL_RXFILTER_ALLMULTI; 651 else 652 rxfilt &= ~XL_RXFILTER_ALLMULTI; 653 654 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 655 656 return; 657 } 658 659 /* 660 * 3c905B adapters have a hash filter that we can program. 661 */ 662 void xl_setmulti_hash(sc) 663 struct xl_softc *sc; 664 { 665 struct ifnet *ifp; 666 int h = 0, i; 667 struct arpcom *ac = &sc->sc_arpcom; 668 struct ether_multi *enm; 669 struct ether_multistep step; 670 u_int8_t rxfilt; 671 int mcnt = 0; 672 673 ifp = &sc->sc_arpcom.ac_if; 674 675 XL_SEL_WIN(5); 676 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 677 678 if (ifp->if_flags & IFF_ALLMULTI) { 679 allmulti: 680 rxfilt |= XL_RXFILTER_ALLMULTI; 681 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 682 return; 683 } else 684 rxfilt &= ~XL_RXFILTER_ALLMULTI; 685 686 687 /* first, zot all the existing hash bits */ 688 for (i = 0; i < XL_HASHFILT_SIZE; i++) 689 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); 690 691 /* now program new ones */ 692 ETHER_FIRST_MULTI(step, ac, enm); 693 while (enm != NULL) { 694 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 695 ifp->if_flags |= IFF_ALLMULTI; 696 goto allmulti; 697 } 698 h = xl_calchash(enm->enm_addrlo); 699 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h); 700 mcnt++; 701 ETHER_NEXT_MULTI(step, enm); 702 } 703 704 if (mcnt) 705 rxfilt |= XL_RXFILTER_MULTIHASH; 706 else 707 rxfilt &= ~XL_RXFILTER_MULTIHASH; 708 709 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 710 711 return; 712 } 713 714 #ifdef notdef 715 void xl_testpacket(sc) 716 struct xl_softc *sc; 717 { 718 struct mbuf *m; 719 struct ifnet *ifp; 720 int error; 721 722 ifp = &sc->sc_arpcom.ac_if; 723 724 MGETHDR(m, M_DONTWAIT, MT_DATA); 725 726 if (m == NULL) 727 return; 728 729 bcopy(&sc->sc_arpcom.ac_enaddr, 730 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN); 731 bcopy(&sc->sc_arpcom.ac_enaddr, 732 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN); 733 mtod(m, struct ether_header *)->ether_type = htons(3); 734 mtod(m, unsigned char *)[14] = 0; 735 mtod(m, unsigned char *)[15] = 0; 736 mtod(m, unsigned char *)[16] = 0xE3; 737 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; 738 IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 739 xl_start(ifp); 740 741 return; 742 } 743 #endif 744 745 void xl_setcfg(sc) 746 struct xl_softc *sc; 747 { 748 u_int32_t icfg; 749 750 XL_SEL_WIN(3); 751 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 752 icfg &= ~XL_ICFG_CONNECTOR_MASK; 753 if (sc->xl_media & XL_MEDIAOPT_MII || 754 sc->xl_media & XL_MEDIAOPT_BT4) 755 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); 756 if (sc->xl_media & XL_MEDIAOPT_BTX) 757 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); 758 759 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 760 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 761 } 762 763 void xl_setmode(sc, media) 764 struct xl_softc *sc; 765 int media; 766 { 767 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 768 u_int32_t icfg; 769 u_int16_t mediastat; 770 771 XL_SEL_WIN(4); 772 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 773 XL_SEL_WIN(3); 774 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 775 776 if (sc->xl_media & XL_MEDIAOPT_BT) { 777 if (IFM_SUBTYPE(media) == IFM_10_T) { 778 ifp->if_baudrate = IF_Mbps(10); 779 sc->xl_xcvr = XL_XCVR_10BT; 780 icfg &= ~XL_ICFG_CONNECTOR_MASK; 781 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); 782 mediastat |= XL_MEDIASTAT_LINKBEAT| 783 XL_MEDIASTAT_JABGUARD; 784 mediastat &= ~XL_MEDIASTAT_SQEENB; 785 } 786 } 787 788 if (sc->xl_media & XL_MEDIAOPT_BFX) { 789 if (IFM_SUBTYPE(media) == IFM_100_FX) { 790 ifp->if_baudrate = IF_Mbps(100); 791 sc->xl_xcvr = XL_XCVR_100BFX; 792 icfg &= ~XL_ICFG_CONNECTOR_MASK; 793 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); 794 mediastat |= XL_MEDIASTAT_LINKBEAT; 795 mediastat &= ~XL_MEDIASTAT_SQEENB; 796 } 797 } 798 799 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 800 if (IFM_SUBTYPE(media) == IFM_10_5) { 801 ifp->if_baudrate = IF_Mbps(10); 802 sc->xl_xcvr = XL_XCVR_AUI; 803 icfg &= ~XL_ICFG_CONNECTOR_MASK; 804 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 805 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 806 XL_MEDIASTAT_JABGUARD); 807 mediastat |= ~XL_MEDIASTAT_SQEENB; 808 } 809 if (IFM_SUBTYPE(media) == IFM_10_FL) { 810 ifp->if_baudrate = IF_Mbps(10); 811 sc->xl_xcvr = XL_XCVR_AUI; 812 icfg &= ~XL_ICFG_CONNECTOR_MASK; 813 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 814 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 815 XL_MEDIASTAT_JABGUARD); 816 mediastat |= ~XL_MEDIASTAT_SQEENB; 817 } 818 } 819 820 if (sc->xl_media & XL_MEDIAOPT_BNC) { 821 if (IFM_SUBTYPE(media) == IFM_10_2) { 822 ifp->if_baudrate = IF_Mbps(10); 823 sc->xl_xcvr = XL_XCVR_COAX; 824 icfg &= ~XL_ICFG_CONNECTOR_MASK; 825 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); 826 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 827 XL_MEDIASTAT_JABGUARD| 828 XL_MEDIASTAT_SQEENB); 829 } 830 } 831 832 if ((media & IFM_GMASK) == IFM_FDX || 833 IFM_SUBTYPE(media) == IFM_100_FX) { 834 XL_SEL_WIN(3); 835 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 836 } else { 837 XL_SEL_WIN(3); 838 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 839 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 840 } 841 842 if (IFM_SUBTYPE(media) == IFM_10_2) 843 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 844 else 845 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 846 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 847 XL_SEL_WIN(4); 848 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); 849 DELAY(800); 850 XL_SEL_WIN(7); 851 } 852 853 void xl_reset(sc, hard) 854 struct xl_softc *sc; 855 int hard; 856 { 857 register int i; 858 859 XL_SEL_WIN(0); 860 if (hard || (sc->xl_flags & XL_FLAG_WEIRDRESET)) { 861 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 862 ((sc->xl_flags & XL_FLAG_WEIRDRESET)?0xFF:0)); 863 } 864 else 865 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 0x0010); 866 xl_wait(sc); 867 868 for (i = 0; i < XL_TIMEOUT; i++) { 869 DELAY(10); 870 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 871 break; 872 } 873 874 DELAY(100000); 875 876 /* Reset TX and RX. */ 877 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 878 xl_wait(sc); 879 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 880 xl_wait(sc); 881 882 if (sc->xl_flags & XL_FLAG_WEIRDRESET) { 883 XL_SEL_WIN(2); 884 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, 885 XL_W2_RESET_OPTIONS) | 0x4010); 886 } 887 888 /* Wait a little while for the chip to get its brains in order. */ 889 DELAY(100000); 890 return; 891 } 892 893 /* 894 * This routine is a kludge to work around possible hardware faults 895 * or manufacturing defects that can cause the media options register 896 * (or reset options register, as it's called for the first generation 897 * 3c90x adapters) to return an incorrect result. I have encountered 898 * one Dell Latitude laptop docking station with an integrated 3c905-TX 899 * which doesn't have any of the 'mediaopt' bits set. This screws up 900 * the attach routine pretty badly because it doesn't know what media 901 * to look for. If we find ourselves in this predicament, this routine 902 * will try to guess the media options values and warn the user of a 903 * possible manufacturing defect with his adapter/system/whatever. 904 */ 905 void xl_mediacheck(sc) 906 struct xl_softc *sc; 907 { 908 /* 909 * If some of the media options bits are set, assume they are 910 * correct. If not, try to figure it out down below. 911 * XXX I should check for 10baseFL, but I don't have an adapter 912 * to test with. 913 */ 914 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { 915 /* 916 * Check the XCVR value. If it's not in the normal range 917 * of values, we need to fake it up here. 918 */ 919 if (sc->xl_xcvr <= XL_XCVR_AUTO) 920 return; 921 else { 922 printf("%s: bogus xcvr value " 923 "in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr); 924 printf("%s: choosing new default based " 925 "on card type\n", sc->sc_dev.dv_xname); 926 } 927 } else { 928 if (sc->xl_type == XL_TYPE_905B && 929 sc->xl_media & XL_MEDIAOPT_10FL) 930 return; 931 printf("%s: WARNING: no media options bits set in " 932 "the media options register!!\n", sc->sc_dev.dv_xname); 933 printf("%s: this could be a manufacturing defect in " 934 "your adapter or system\n", sc->sc_dev.dv_xname); 935 printf("%s: attempting to guess media type; you " 936 "should probably consult your vendor\n", sc->sc_dev.dv_xname); 937 } 938 939 xl_choose_xcvr(sc, 1); 940 } 941 942 void xl_choose_xcvr(sc, verbose) 943 struct xl_softc *sc; 944 int verbose; 945 { 946 u_int16_t devid; 947 948 /* 949 * Read the device ID from the EEPROM. 950 * This is what's loaded into the PCI device ID register, so it has 951 * to be correct otherwise we wouldn't have gotten this far. 952 */ 953 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); 954 955 switch(devid) { 956 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ 957 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ 958 sc->xl_media = XL_MEDIAOPT_BT; 959 sc->xl_xcvr = XL_XCVR_10BT; 960 if (verbose) 961 printf("%s: guessing 10BaseT transceiver\n", 962 sc->sc_dev.dv_xname); 963 break; 964 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ 965 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ 966 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 967 sc->xl_xcvr = XL_XCVR_10BT; 968 if (verbose) 969 printf("%s: guessing COMBO (AUI/BNC/TP)\n", 970 sc->sc_dev.dv_xname); 971 break; 972 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ 973 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; 974 sc->xl_xcvr = XL_XCVR_10BT; 975 if (verbose) 976 printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname); 977 break; 978 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ 979 sc->xl_media = XL_MEDIAOPT_10FL; 980 sc->xl_xcvr = XL_XCVR_AUI; 981 if (verbose) 982 printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname); 983 break; 984 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ 985 case TC_DEVICEID_HURRICANE_555: /* 3c555 */ 986 case TC_DEVICEID_HURRICANE_556: /* 3c556 */ 987 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ 988 sc->xl_media = XL_MEDIAOPT_MII; 989 sc->xl_xcvr = XL_XCVR_MII; 990 if (verbose) 991 printf("%s: guessing MII\n", sc->sc_dev.dv_xname); 992 break; 993 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ 994 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ 995 sc->xl_media = XL_MEDIAOPT_BT4; 996 sc->xl_xcvr = XL_XCVR_MII; 997 if (verbose) 998 printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname); 999 break; 1000 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ 1001 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */ 1002 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ 1003 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ 1004 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ 1005 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ 1006 sc->xl_media = XL_MEDIAOPT_BTX; 1007 sc->xl_xcvr = XL_XCVR_AUTO; 1008 if (verbose) 1009 printf("%s: guessing 10/100 internal\n", 1010 sc->sc_dev.dv_xname); 1011 break; 1012 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ 1013 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 1014 sc->xl_xcvr = XL_XCVR_AUTO; 1015 if (verbose) 1016 printf("%s: guessing 10/100 plus BNC/AUI\n", 1017 sc->sc_dev.dv_xname); 1018 break; 1019 case TC_DEVICEID_3C575_CARDBUS: 1020 case TC_DEVICEID_3CCFE575BT_CARDBUS: 1021 case TC_DEVICEID_3CCFE575CT_CARDBUS: 1022 case TC_DEVICEID_3CCFEM656_CARDBUS: 1023 case TC_DEVICEID_3CCFEM656B_CARDBUS: 1024 case TC_DEVICEID_3CCFEM656C_CARDBUS: 1025 sc->xl_media = XL_MEDIAOPT_MII; 1026 sc->xl_xcvr = XL_XCVR_MII; 1027 break; 1028 default: 1029 printf("%s: unknown device ID: %x -- " 1030 "defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid); 1031 sc->xl_media = XL_MEDIAOPT_BT; 1032 break; 1033 } 1034 1035 return; 1036 } 1037 1038 /* 1039 * Initialize the transmit descriptors. 1040 */ 1041 int xl_list_tx_init(sc) 1042 struct xl_softc *sc; 1043 { 1044 struct xl_chain_data *cd; 1045 struct xl_list_data *ld; 1046 int i; 1047 1048 cd = &sc->xl_cdata; 1049 ld = sc->xl_ldata; 1050 for (i = 0; i < XL_TX_LIST_CNT; i++) { 1051 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1052 if (i == (XL_TX_LIST_CNT - 1)) 1053 cd->xl_tx_chain[i].xl_next = NULL; 1054 else 1055 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; 1056 } 1057 1058 cd->xl_tx_free = &cd->xl_tx_chain[0]; 1059 cd->xl_tx_tail = cd->xl_tx_head = NULL; 1060 1061 return(0); 1062 } 1063 1064 /* 1065 * Initialize the transmit descriptors. 1066 */ 1067 int 1068 xl_list_tx_init_90xB(sc) 1069 struct xl_softc *sc; 1070 { 1071 struct xl_chain_data *cd; 1072 struct xl_list_data *ld; 1073 int i; 1074 1075 cd = &sc->xl_cdata; 1076 ld = sc->xl_ldata; 1077 for (i = 0; i < XL_TX_LIST_CNT; i++) { 1078 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1079 cd->xl_tx_chain[i].xl_phys = 1080 sc->sc_listmap->dm_segs[0].ds_addr + 1081 offsetof(struct xl_list_data, xl_tx_list[i]); 1082 if (i == (XL_TX_LIST_CNT - 1)) 1083 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; 1084 else 1085 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; 1086 if (i == 0) 1087 cd->xl_tx_chain[i].xl_prev = 1088 &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; 1089 else 1090 cd->xl_tx_chain[i].xl_prev = 1091 &cd->xl_tx_chain[i - 1]; 1092 } 1093 1094 bzero((char *)ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT); 1095 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); 1096 1097 cd->xl_tx_prod = 1; 1098 cd->xl_tx_cons = 1; 1099 cd->xl_tx_cnt = 0; 1100 1101 return (0); 1102 } 1103 1104 /* 1105 * Initialize the RX descriptors and allocate mbufs for them. Note that 1106 * we arrange the descriptors in a closed ring, so that the last descriptor 1107 * points back to the first. 1108 */ 1109 int xl_list_rx_init(sc) 1110 struct xl_softc *sc; 1111 { 1112 struct xl_chain_data *cd; 1113 struct xl_list_data *ld; 1114 int i; 1115 bus_addr_t next; 1116 1117 cd = &sc->xl_cdata; 1118 ld = sc->xl_ldata; 1119 1120 for (i = 0; i < XL_RX_LIST_CNT; i++) { 1121 cd->xl_rx_chain[i].xl_ptr = 1122 (struct xl_list_onefrag *)&ld->xl_rx_list[i]; 1123 if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS) 1124 return(ENOBUFS); 1125 next = sc->sc_listmap->dm_segs[0].ds_addr; 1126 if (i == (XL_RX_LIST_CNT - 1)) { 1127 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0]; 1128 next += 1129 offsetof(struct xl_list_data, xl_rx_list[0]); 1130 } else { 1131 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1]; 1132 next += 1133 offsetof(struct xl_list_data, xl_rx_list[i + 1]); 1134 } 1135 ld->xl_rx_list[i].xl_next = htole32(next); 1136 } 1137 1138 cd->xl_rx_head = &cd->xl_rx_chain[0]; 1139 1140 return(0); 1141 } 1142 1143 /* 1144 * Initialize an RX descriptor and attach an MBUF cluster. 1145 */ 1146 int xl_newbuf(sc, c) 1147 struct xl_softc *sc; 1148 struct xl_chain_onefrag *c; 1149 { 1150 struct mbuf *m_new = NULL; 1151 bus_dmamap_t map; 1152 1153 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1154 if (m_new == NULL) 1155 return(ENOBUFS); 1156 1157 MCLGET(m_new, M_DONTWAIT); 1158 if (!(m_new->m_flags & M_EXT)) { 1159 m_freem(m_new); 1160 return(ENOBUFS); 1161 } 1162 1163 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1164 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap, 1165 mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) { 1166 printf("%s: rx load failed\n", sc->sc_dev.dv_xname); 1167 m_freem(m_new); 1168 return (ENOBUFS); 1169 } 1170 1171 /* sync the old map, and unload it (if necessary) */ 1172 if (c->map->dm_nsegs != 0) { 1173 bus_dmamap_sync(sc->sc_dmat, c->map, 1174 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1175 bus_dmamap_unload(sc->sc_dmat, c->map); 1176 } 1177 1178 map = c->map; 1179 c->map = sc->sc_rx_sparemap; 1180 sc->sc_rx_sparemap = map; 1181 1182 /* Force longword alignment for packet payload. */ 1183 m_adj(m_new, ETHER_ALIGN); 1184 1185 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1186 BUS_DMASYNC_PREREAD); 1187 1188 c->xl_mbuf = m_new; 1189 c->xl_ptr->xl_frag.xl_addr = 1190 htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN); 1191 c->xl_ptr->xl_frag.xl_len = 1192 htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG); 1193 c->xl_ptr->xl_status = htole32(0); 1194 1195 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1196 ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list), 1197 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1198 1199 return(0); 1200 } 1201 1202 int xl_rx_resync(sc) 1203 struct xl_softc *sc; 1204 { 1205 struct xl_chain_onefrag *pos; 1206 int i; 1207 1208 pos = sc->xl_cdata.xl_rx_head; 1209 1210 for (i = 0; i < XL_RX_LIST_CNT; i++) { 1211 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1212 ((caddr_t)pos->xl_ptr - sc->sc_listkva), 1213 sizeof(struct xl_list), 1214 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1215 1216 if (pos->xl_ptr->xl_status) 1217 break; 1218 pos = pos->xl_next; 1219 } 1220 1221 if (i == XL_RX_LIST_CNT) 1222 return (0); 1223 1224 sc->xl_cdata.xl_rx_head = pos; 1225 1226 return (EAGAIN); 1227 } 1228 1229 /* 1230 * A frame has been uploaded: pass the resulting mbuf chain up to 1231 * the higher level protocols. 1232 */ 1233 void xl_rxeof(sc) 1234 struct xl_softc *sc; 1235 { 1236 struct mbuf *m; 1237 struct ifnet *ifp; 1238 struct xl_chain_onefrag *cur_rx; 1239 int total_len = 0, sumflags = 0; 1240 u_int32_t rxstat; 1241 1242 ifp = &sc->sc_arpcom.ac_if; 1243 1244 again: 1245 1246 while ((rxstat = letoh32(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) 1247 != 0) { 1248 cur_rx = sc->xl_cdata.xl_rx_head; 1249 sc->xl_cdata.xl_rx_head = cur_rx->xl_next; 1250 1251 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1252 ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva), 1253 sizeof(struct xl_list), 1254 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1255 1256 /* 1257 * If an error occurs, update stats, clear the 1258 * status word and leave the mbuf cluster in place: 1259 * it should simply get re-used next time this descriptor 1260 * comes up in the ring. 1261 */ 1262 if (rxstat & XL_RXSTAT_UP_ERROR) { 1263 ifp->if_ierrors++; 1264 cur_rx->xl_ptr->xl_status = htole32(0); 1265 continue; 1266 } 1267 1268 /* 1269 * If there error bit was not set, the upload complete 1270 * bit should be set which means we have a valid packet. 1271 * If not, something truly strange has happened. 1272 */ 1273 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { 1274 printf("%s: bad receive status -- " 1275 "packet dropped", sc->sc_dev.dv_xname); 1276 ifp->if_ierrors++; 1277 cur_rx->xl_ptr->xl_status = htole32(0); 1278 continue; 1279 } 1280 1281 /* No errors; receive the packet. */ 1282 m = cur_rx->xl_mbuf; 1283 total_len = letoh32(cur_rx->xl_ptr->xl_status) & 1284 XL_RXSTAT_LENMASK; 1285 1286 /* 1287 * Try to conjure up a new mbuf cluster. If that 1288 * fails, it means we have an out of memory condition and 1289 * should leave the buffer in place and continue. This will 1290 * result in a lost packet, but there's little else we 1291 * can do in this situation. 1292 */ 1293 if (xl_newbuf(sc, cur_rx) == ENOBUFS) { 1294 ifp->if_ierrors++; 1295 cur_rx->xl_ptr->xl_status = htole32(0); 1296 continue; 1297 } 1298 1299 ifp->if_ipackets++; 1300 m->m_pkthdr.rcvif = ifp; 1301 m->m_pkthdr.len = m->m_len = total_len; 1302 #if NBPFILTER > 0 1303 /* 1304 * Handle BPF listeners. Let the BPF user see the packet. 1305 */ 1306 if (ifp->if_bpf) { 1307 bpf_mtap(ifp->if_bpf, m); 1308 } 1309 #endif 1310 1311 if (sc->xl_type == XL_TYPE_905B) { 1312 if (rxstat & XL_RXSTAT_IPCKERR) 1313 sumflags |= M_IPV4_CSUM_IN_BAD; 1314 else if (rxstat & XL_RXSTAT_IPCKOK) 1315 sumflags |= M_IPV4_CSUM_IN_OK; 1316 1317 if (rxstat & XL_RXSTAT_TCPCKERR) 1318 sumflags |= M_TCP_CSUM_IN_BAD; 1319 else if (rxstat & XL_RXSTAT_TCPCKOK) 1320 sumflags |= M_TCP_CSUM_IN_OK; 1321 1322 if (rxstat & XL_RXSTAT_UDPCKERR) 1323 sumflags |= M_UDP_CSUM_IN_BAD; 1324 else if (rxstat & XL_RXSTAT_UDPCKOK) 1325 sumflags |= M_UDP_CSUM_IN_OK; 1326 1327 m->m_pkthdr.csum = sumflags; 1328 } 1329 ether_input_mbuf(ifp, m); 1330 } 1331 1332 /* 1333 * Handle the 'end of channel' condition. When the upload 1334 * engine hits the end of the RX ring, it will stall. This 1335 * is our cue to flush the RX ring, reload the uplist pointer 1336 * register and unstall the engine. 1337 * XXX This is actually a little goofy. With the ThunderLAN 1338 * chip, you get an interrupt when the receiver hits the end 1339 * of the receive ring, which tells you exactly when you 1340 * you need to reload the ring pointer. Here we have to 1341 * fake it. I'm mad at myself for not being clever enough 1342 * to avoid the use of a goto here. 1343 */ 1344 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || 1345 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { 1346 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 1347 xl_wait(sc); 1348 CSR_WRITE_4(sc, XL_UPLIST_PTR, 1349 sc->sc_listmap->dm_segs[0].ds_addr + 1350 offsetof(struct xl_list_data, xl_rx_list[0])); 1351 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; 1352 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 1353 goto again; 1354 } 1355 1356 return; 1357 } 1358 1359 /* 1360 * A frame was downloaded to the chip. It's safe for us to clean up 1361 * the list buffers. 1362 */ 1363 void xl_txeof(sc) 1364 struct xl_softc *sc; 1365 { 1366 struct xl_chain *cur_tx; 1367 struct ifnet *ifp; 1368 1369 ifp = &sc->sc_arpcom.ac_if; 1370 1371 /* Clear the timeout timer. */ 1372 ifp->if_timer = 0; 1373 1374 /* 1375 * Go through our tx list and free mbufs for those 1376 * frames that have been uploaded. Note: the 3c905B 1377 * sets a special bit in the status word to let us 1378 * know that a frame has been downloaded, but the 1379 * original 3c900/3c905 adapters don't do that. 1380 * Consequently, we have to use a different test if 1381 * xl_type != XL_TYPE_905B. 1382 */ 1383 while(sc->xl_cdata.xl_tx_head != NULL) { 1384 cur_tx = sc->xl_cdata.xl_tx_head; 1385 1386 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1387 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva), 1388 sizeof(struct xl_list), 1389 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1390 1391 if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1392 break; 1393 1394 sc->xl_cdata.xl_tx_head = cur_tx->xl_next; 1395 ifp->if_opackets++; 1396 if (cur_tx->map->dm_nsegs != 0) { 1397 bus_dmamap_t map = cur_tx->map; 1398 1399 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1400 BUS_DMASYNC_POSTWRITE); 1401 bus_dmamap_unload(sc->sc_dmat, map); 1402 } 1403 if (cur_tx->xl_mbuf != NULL) { 1404 m_freem(cur_tx->xl_mbuf); 1405 cur_tx->xl_mbuf = NULL; 1406 } 1407 cur_tx->xl_next = sc->xl_cdata.xl_tx_free; 1408 sc->xl_cdata.xl_tx_free = cur_tx; 1409 } 1410 1411 if (sc->xl_cdata.xl_tx_head == NULL) { 1412 ifp->if_flags &= ~IFF_OACTIVE; 1413 sc->xl_cdata.xl_tx_tail = NULL; 1414 } else { 1415 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || 1416 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { 1417 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1418 sc->sc_listmap->dm_segs[0].ds_addr + 1419 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1420 sc->sc_listkva)); 1421 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1422 } 1423 } 1424 1425 return; 1426 } 1427 1428 void 1429 xl_txeof_90xB(sc) 1430 struct xl_softc *sc; 1431 { 1432 struct xl_chain *cur_tx = NULL; 1433 struct ifnet *ifp; 1434 int idx; 1435 1436 ifp = &sc->sc_arpcom.ac_if; 1437 1438 idx = sc->xl_cdata.xl_tx_cons; 1439 while(idx != sc->xl_cdata.xl_tx_prod) { 1440 1441 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1442 1443 if ((cur_tx->xl_ptr->xl_status & 1444 htole32(XL_TXSTAT_DL_COMPLETE)) == 0) 1445 break; 1446 1447 if (cur_tx->xl_mbuf != NULL) { 1448 m_freem(cur_tx->xl_mbuf); 1449 cur_tx->xl_mbuf = NULL; 1450 } 1451 1452 if (cur_tx->map->dm_nsegs != 0) { 1453 bus_dmamap_sync(sc->sc_dmat, cur_tx->map, 1454 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1455 bus_dmamap_unload(sc->sc_dmat, cur_tx->map); 1456 } 1457 1458 ifp->if_opackets++; 1459 1460 sc->xl_cdata.xl_tx_cnt--; 1461 XL_INC(idx, XL_TX_LIST_CNT); 1462 ifp->if_timer = 0; 1463 } 1464 1465 sc->xl_cdata.xl_tx_cons = idx; 1466 1467 if (cur_tx != NULL) 1468 ifp->if_flags &= ~IFF_OACTIVE; 1469 } 1470 1471 /* 1472 * TX 'end of channel' interrupt handler. Actually, we should 1473 * only get a 'TX complete' interrupt if there's a transmit error, 1474 * so this is really TX error handler. 1475 */ 1476 void xl_txeoc(sc) 1477 struct xl_softc *sc; 1478 { 1479 u_int8_t txstat; 1480 1481 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { 1482 if (txstat & XL_TXSTATUS_UNDERRUN || 1483 txstat & XL_TXSTATUS_JABBER || 1484 txstat & XL_TXSTATUS_RECLAIM) { 1485 if (txstat != 0x90) { 1486 printf("%s: transmission error: %x\n", 1487 sc->sc_dev.dv_xname, txstat); 1488 } 1489 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1490 xl_wait(sc); 1491 if (sc->xl_type == XL_TYPE_905B) { 1492 int i; 1493 struct xl_chain *c; 1494 i = sc->xl_cdata.xl_tx_cons; 1495 c = &sc->xl_cdata.xl_tx_chain[i]; 1496 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); 1497 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 1498 } else { 1499 if (sc->xl_cdata.xl_tx_head != NULL) 1500 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1501 sc->sc_listmap->dm_segs[0].ds_addr + 1502 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1503 sc->sc_listkva)); 1504 } 1505 /* 1506 * Remember to set this for the 1507 * first generation 3c90X chips. 1508 */ 1509 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1510 if (txstat & XL_TXSTATUS_UNDERRUN && 1511 sc->xl_tx_thresh < XL_PACKET_SIZE) { 1512 sc->xl_tx_thresh += XL_MIN_FRAMELEN; 1513 #ifdef notdef 1514 printf("%s: tx underrun, increasing tx start" 1515 " threshold to %d\n", sc->sc_dev.dv_xname, 1516 sc->xl_tx_thresh); 1517 #endif 1518 } 1519 CSR_WRITE_2(sc, XL_COMMAND, 1520 XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1521 if (sc->xl_type == XL_TYPE_905B) { 1522 CSR_WRITE_2(sc, XL_COMMAND, 1523 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1524 } 1525 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1526 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1527 } else { 1528 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1529 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1530 } 1531 /* 1532 * Write an arbitrary byte to the TX_STATUS register 1533 * to clear this interrupt/error and advance to the next. 1534 */ 1535 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); 1536 } 1537 1538 return; 1539 } 1540 1541 int xl_intr(arg) 1542 void *arg; 1543 { 1544 struct xl_softc *sc; 1545 struct ifnet *ifp; 1546 u_int16_t status; 1547 int claimed = 0; 1548 1549 sc = arg; 1550 ifp = &sc->sc_arpcom.ac_if; 1551 1552 while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS) { 1553 1554 claimed = 1; 1555 1556 CSR_WRITE_2(sc, XL_COMMAND, 1557 XL_CMD_INTR_ACK|(status & XL_INTRS)); 1558 1559 if (sc->intr_ack) 1560 (*sc->intr_ack)(sc); 1561 1562 if (status & XL_STAT_UP_COMPLETE) { 1563 int curpkts; 1564 1565 curpkts = ifp->if_ipackets; 1566 xl_rxeof(sc); 1567 if (curpkts == ifp->if_ipackets) { 1568 while (xl_rx_resync(sc)) 1569 xl_rxeof(sc); 1570 } 1571 } 1572 1573 if (status & XL_STAT_DOWN_COMPLETE) { 1574 if (sc->xl_type == XL_TYPE_905B) 1575 xl_txeof_90xB(sc); 1576 else 1577 xl_txeof(sc); 1578 } 1579 1580 if (status & XL_STAT_TX_COMPLETE) { 1581 ifp->if_oerrors++; 1582 xl_txeoc(sc); 1583 } 1584 1585 if (status & XL_STAT_ADFAIL) { 1586 xl_reset(sc, 0); 1587 xl_init(sc); 1588 } 1589 1590 if (status & XL_STAT_STATSOFLOW) { 1591 sc->xl_stats_no_timeout = 1; 1592 xl_stats_update(sc); 1593 sc->xl_stats_no_timeout = 0; 1594 } 1595 } 1596 1597 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1598 (*ifp->if_start)(ifp); 1599 1600 return (claimed); 1601 } 1602 1603 void xl_stats_update(xsc) 1604 void *xsc; 1605 { 1606 struct xl_softc *sc; 1607 struct ifnet *ifp; 1608 struct xl_stats xl_stats; 1609 u_int8_t *p; 1610 int i; 1611 struct mii_data *mii = NULL; 1612 1613 bzero((char *)&xl_stats, sizeof(struct xl_stats)); 1614 1615 sc = xsc; 1616 ifp = &sc->sc_arpcom.ac_if; 1617 if (sc->xl_hasmii) 1618 mii = &sc->sc_mii; 1619 1620 p = (u_int8_t *)&xl_stats; 1621 1622 /* Read all the stats registers. */ 1623 XL_SEL_WIN(6); 1624 1625 for (i = 0; i < 16; i++) 1626 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); 1627 1628 ifp->if_ierrors += xl_stats.xl_rx_overrun; 1629 1630 ifp->if_collisions += xl_stats.xl_tx_multi_collision + 1631 xl_stats.xl_tx_single_collision + 1632 xl_stats.xl_tx_late_collision; 1633 1634 /* 1635 * Boomerang and cyclone chips have an extra stats counter 1636 * in window 4 (BadSSD). We have to read this too in order 1637 * to clear out all the stats registers and avoid a statsoflow 1638 * interrupt. 1639 */ 1640 XL_SEL_WIN(4); 1641 CSR_READ_1(sc, XL_W4_BADSSD); 1642 1643 if (mii != NULL) 1644 mii_tick(mii); 1645 1646 XL_SEL_WIN(7); 1647 1648 if (!sc->xl_stats_no_timeout) 1649 timeout_add(&sc->xl_stsup_tmo, hz); 1650 1651 return; 1652 } 1653 1654 /* 1655 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1656 * pointers to the fragment pointers. 1657 */ 1658 int xl_encap(sc, c, m_head) 1659 struct xl_softc *sc; 1660 struct xl_chain *c; 1661 struct mbuf *m_head; 1662 { 1663 int frag, total_len; 1664 bus_dmamap_t map; 1665 1666 map = sc->sc_tx_sparemap; 1667 1668 reload: 1669 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 1670 m_head, BUS_DMA_NOWAIT) != 0) 1671 return (ENOBUFS); 1672 1673 /* 1674 * Start packing the mbufs in this chain into 1675 * the fragment pointers. Stop when we run out 1676 * of fragments or hit the end of the mbuf chain. 1677 */ 1678 for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) { 1679 if ((XL_TX_LIST_CNT - (sc->xl_cdata.xl_tx_cnt + frag)) < 3) 1680 return (ENOBUFS); 1681 if (frag == XL_MAXFRAGS) 1682 break; 1683 total_len += map->dm_segs[frag].ds_len; 1684 c->xl_ptr->xl_frag[frag].xl_addr = 1685 htole32(map->dm_segs[frag].ds_addr); 1686 c->xl_ptr->xl_frag[frag].xl_len = 1687 htole32(map->dm_segs[frag].ds_len); 1688 } 1689 1690 /* 1691 * Handle special case: we used up all 63 fragments, 1692 * but we have more mbufs left in the chain. Copy the 1693 * data into an mbuf cluster. Note that we don't 1694 * bother clearing the values in the other fragment 1695 * pointers/counters; it wouldn't gain us anything, 1696 * and would waste cycles. 1697 */ 1698 if (frag != map->dm_nsegs) { 1699 struct mbuf *m_new = NULL; 1700 1701 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1702 if (m_new == NULL) 1703 return(1); 1704 if (m_head->m_pkthdr.len > MHLEN) { 1705 MCLGET(m_new, M_DONTWAIT); 1706 if (!(m_new->m_flags & M_EXT)) { 1707 m_freem(m_new); 1708 return(1); 1709 } 1710 } 1711 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1712 mtod(m_new, caddr_t)); 1713 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1714 m_freem(m_head); 1715 m_head = m_new; 1716 goto reload; 1717 } 1718 1719 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1720 BUS_DMASYNC_PREWRITE); 1721 1722 if (c->map->dm_nsegs != 0) { 1723 bus_dmamap_sync(sc->sc_dmat, c->map, 1724 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1725 bus_dmamap_unload(sc->sc_dmat, c->map); 1726 } 1727 1728 c->xl_mbuf = m_head; 1729 sc->sc_tx_sparemap = c->map; 1730 c->map = map; 1731 c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG); 1732 c->xl_ptr->xl_status = htole32(total_len); 1733 c->xl_ptr->xl_next = 0; 1734 1735 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1736 offsetof(struct xl_list_data, xl_tx_list[0]), 1737 sizeof(struct xl_list) * XL_TX_LIST_CNT, 1738 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1739 1740 return(0); 1741 } 1742 1743 /* 1744 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1745 * to the mbuf data regions directly in the transmit lists. We also save a 1746 * copy of the pointers since the transmit list fragment pointers are 1747 * physical addresses. 1748 */ 1749 void xl_start(ifp) 1750 struct ifnet *ifp; 1751 { 1752 struct xl_softc *sc; 1753 struct mbuf *m_head = NULL; 1754 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1755 1756 sc = ifp->if_softc; 1757 1758 /* 1759 * Check for an available queue slot. If there are none, 1760 * punt. 1761 */ 1762 if (sc->xl_cdata.xl_tx_free == NULL) { 1763 xl_txeoc(sc); 1764 xl_txeof(sc); 1765 if (sc->xl_cdata.xl_tx_free == NULL) { 1766 ifp->if_flags |= IFF_OACTIVE; 1767 return; 1768 } 1769 } 1770 1771 start_tx = sc->xl_cdata.xl_tx_free; 1772 1773 while(sc->xl_cdata.xl_tx_free != NULL) { 1774 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1775 if (m_head == NULL) 1776 break; 1777 1778 /* Pick a descriptor off the free list. */ 1779 cur_tx = sc->xl_cdata.xl_tx_free; 1780 sc->xl_cdata.xl_tx_free = cur_tx->xl_next; 1781 1782 cur_tx->xl_next = NULL; 1783 1784 /* Pack the data into the descriptor. */ 1785 xl_encap(sc, cur_tx, m_head); 1786 1787 /* Chain it together. */ 1788 if (prev != NULL) { 1789 prev->xl_next = cur_tx; 1790 prev->xl_ptr->xl_next = 1791 sc->sc_listmap->dm_segs[0].ds_addr + 1792 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva); 1793 1794 } 1795 prev = cur_tx; 1796 1797 #if NBPFILTER > 0 1798 /* 1799 * If there's a BPF listener, bounce a copy of this frame 1800 * to him. 1801 */ 1802 if (ifp->if_bpf) 1803 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf); 1804 #endif 1805 } 1806 1807 /* 1808 * If there are no packets queued, bail. 1809 */ 1810 if (cur_tx == NULL) 1811 return; 1812 1813 /* 1814 * Place the request for the upload interrupt 1815 * in the last descriptor in the chain. This way, if 1816 * we're chaining several packets at once, we'll only 1817 * get an interupt once for the whole chain rather than 1818 * once for each packet. 1819 */ 1820 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1821 1822 /* 1823 * Queue the packets. If the TX channel is clear, update 1824 * the downlist pointer register. 1825 */ 1826 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 1827 xl_wait(sc); 1828 1829 if (sc->xl_cdata.xl_tx_head != NULL) { 1830 sc->xl_cdata.xl_tx_tail->xl_next = start_tx; 1831 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = 1832 sc->sc_listmap->dm_segs[0].ds_addr + 1833 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva); 1834 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= 1835 htole32(~XL_TXSTAT_DL_INTR); 1836 sc->xl_cdata.xl_tx_tail = cur_tx; 1837 } else { 1838 sc->xl_cdata.xl_tx_head = start_tx; 1839 sc->xl_cdata.xl_tx_tail = cur_tx; 1840 } 1841 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1842 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1843 sc->sc_listmap->dm_segs[0].ds_addr + 1844 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva)); 1845 1846 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1847 1848 XL_SEL_WIN(7); 1849 1850 /* 1851 * Set a timeout in case the chip goes out to lunch. 1852 */ 1853 ifp->if_timer = 5; 1854 1855 /* 1856 * XXX Under certain conditions, usually on slower machines 1857 * where interrupts may be dropped, it's possible for the 1858 * adapter to chew up all the buffers in the receive ring 1859 * and stall, without us being able to do anything about it. 1860 * To guard against this, we need to make a pass over the 1861 * RX queue to make sure there aren't any packets pending. 1862 * Doing it here means we can flush the receive ring at the 1863 * same time the chip is DMAing the transmit descriptors we 1864 * just gave it. 1865 * 1866 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) 1867 * nature of their chips in all their marketing literature; 1868 * we may as well take advantage of it. :) 1869 */ 1870 xl_rxeof(sc); 1871 1872 return; 1873 } 1874 1875 int xl_encap_90xB(sc, c, m_head) 1876 struct xl_softc *sc; 1877 struct xl_chain *c; 1878 struct mbuf *m_head; 1879 { 1880 struct xl_frag *f = NULL; 1881 struct xl_list *d; 1882 int frag; 1883 bus_dmamap_t map; 1884 1885 /* 1886 * Start packing the mbufs in this chain into 1887 * the fragment pointers. Stop when we run out 1888 * of fragments or hit the end of the mbuf chain. 1889 */ 1890 map = sc->sc_tx_sparemap; 1891 d = c->xl_ptr; 1892 d->xl_status = htole32(0); 1893 d->xl_next = 0; 1894 1895 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 1896 m_head, BUS_DMA_NOWAIT) != 0) 1897 return (ENOBUFS); 1898 1899 for (frag = 0; frag < map->dm_nsegs; frag++) { 1900 if (frag == XL_MAXFRAGS) 1901 break; 1902 f = &d->xl_frag[frag]; 1903 f->xl_addr = htole32(map->dm_segs[frag].ds_addr); 1904 f->xl_len = htole32(map->dm_segs[frag].ds_len); 1905 } 1906 1907 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1908 BUS_DMASYNC_PREWRITE); 1909 1910 /* sync the old map, and unload it (if necessary) */ 1911 if (c->map->dm_nsegs != 0) { 1912 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1913 BUS_DMASYNC_POSTWRITE); 1914 bus_dmamap_unload(sc->sc_dmat, c->map); 1915 } 1916 1917 c->xl_mbuf = m_head; 1918 sc->sc_tx_sparemap = c->map; 1919 c->map = map; 1920 c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG); 1921 c->xl_ptr->xl_status = htole32(XL_TXSTAT_RND_DEFEAT); 1922 1923 if (m_head->m_pkthdr.csum & M_IPV4_CSUM_OUT) 1924 c->xl_ptr->xl_status |= htole32(XL_TXSTAT_IPCKSUM); 1925 if (m_head->m_pkthdr.csum & M_TCPV4_CSUM_OUT) 1926 c->xl_ptr->xl_status |= htole32(XL_TXSTAT_TCPCKSUM); 1927 if (m_head->m_pkthdr.csum & M_UDPV4_CSUM_OUT) 1928 c->xl_ptr->xl_status |= htole32(XL_TXSTAT_UDPCKSUM); 1929 1930 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1931 offsetof(struct xl_list_data, xl_tx_list[0]), 1932 sizeof(struct xl_list) * XL_TX_LIST_CNT, 1933 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1934 1935 return(0); 1936 } 1937 1938 void 1939 xl_start_90xB(ifp) 1940 struct ifnet *ifp; 1941 { 1942 struct xl_softc *sc; 1943 struct mbuf *m_head = NULL; 1944 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1945 int idx; 1946 1947 sc = ifp->if_softc; 1948 1949 if (ifp->if_flags & IFF_OACTIVE) 1950 return; 1951 1952 idx = sc->xl_cdata.xl_tx_prod; 1953 start_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1954 1955 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) { 1956 1957 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { 1958 ifp->if_flags |= IFF_OACTIVE; 1959 break; 1960 } 1961 1962 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1963 if (m_head == NULL) 1964 break; 1965 1966 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1967 1968 /* Pack the data into the descriptor. */ 1969 xl_encap_90xB(sc, cur_tx, m_head); 1970 1971 /* Chain it together. */ 1972 if (prev != NULL) 1973 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); 1974 prev = cur_tx; 1975 1976 #if NBPFILTER > 0 1977 /* 1978 * If there's a BPF listener, bounce a copy of this frame 1979 * to him. 1980 */ 1981 if (ifp->if_bpf) 1982 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf); 1983 #endif 1984 1985 XL_INC(idx, XL_TX_LIST_CNT); 1986 sc->xl_cdata.xl_tx_cnt++; 1987 } 1988 1989 /* 1990 * If there are no packets queued, bail. 1991 */ 1992 if (cur_tx == NULL) 1993 return; 1994 1995 /* 1996 * Place the request for the upload interrupt 1997 * in the last descriptor in the chain. This way, if 1998 * we're chaining several packets at once, we'll only 1999 * get an interupt once for the whole chain rather than 2000 * once for each packet. 2001 */ 2002 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 2003 2004 /* Start transmission */ 2005 sc->xl_cdata.xl_tx_prod = idx; 2006 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); 2007 2008 /* 2009 * Set a timeout in case the chip goes out to lunch. 2010 */ 2011 ifp->if_timer = 5; 2012 } 2013 2014 void xl_init(xsc) 2015 void *xsc; 2016 { 2017 struct xl_softc *sc = xsc; 2018 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2019 int s, i; 2020 u_int16_t rxfilt = 0; 2021 struct mii_data *mii = NULL; 2022 2023 s = splimp(); 2024 2025 /* 2026 * Cancel pending I/O and free all RX/TX buffers. 2027 */ 2028 xl_stop(sc); 2029 2030 if (sc->xl_hasmii) 2031 mii = &sc->sc_mii; 2032 2033 if (mii == NULL) { 2034 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 2035 xl_wait(sc); 2036 } 2037 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 2038 xl_wait(sc); 2039 DELAY(10000); 2040 2041 2042 /* Init our MAC address */ 2043 XL_SEL_WIN(2); 2044 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2045 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, 2046 sc->sc_arpcom.ac_enaddr[i]); 2047 } 2048 2049 /* Clear the station mask. */ 2050 for (i = 0; i < 3; i++) 2051 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); 2052 #ifdef notdef 2053 /* Reset TX and RX. */ 2054 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 2055 xl_wait(sc); 2056 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 2057 xl_wait(sc); 2058 #endif 2059 /* Init circular RX list. */ 2060 if (xl_list_rx_init(sc) == ENOBUFS) { 2061 printf("%s: initialization failed: no " 2062 "memory for rx buffers\n", sc->sc_dev.dv_xname); 2063 xl_stop(sc); 2064 splx(s); 2065 return; 2066 } 2067 2068 /* Init TX descriptors. */ 2069 if (sc->xl_type == XL_TYPE_905B) 2070 xl_list_tx_init_90xB(sc); 2071 else 2072 xl_list_tx_init(sc); 2073 2074 /* 2075 * Set the TX freethresh value. 2076 * Note that this has no effect on 3c905B "cyclone" 2077 * cards but is required for 3c900/3c905 "boomerang" 2078 * cards in order to enable the download engine. 2079 */ 2080 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 2081 2082 /* Set the TX start threshold for best performance. */ 2083 sc->xl_tx_thresh = XL_MIN_FRAMELEN; 2084 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); 2085 2086 /* 2087 * If this is a 3c905B, also set the tx reclaim threshold. 2088 * This helps cut down on the number of tx reclaim errors 2089 * that could happen on a busy network. The chip multiplies 2090 * the register value by 16 to obtain the actual threshold 2091 * in bytes, so we divide by 16 when setting the value here. 2092 * The existing threshold value can be examined by reading 2093 * the register at offset 9 in window 5. 2094 */ 2095 if (sc->xl_type == XL_TYPE_905B) { 2096 CSR_WRITE_2(sc, XL_COMMAND, 2097 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 2098 } 2099 2100 /* Set RX filter bits. */ 2101 XL_SEL_WIN(5); 2102 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 2103 2104 /* Set the individual bit to receive frames for this host only. */ 2105 rxfilt |= XL_RXFILTER_INDIVIDUAL; 2106 2107 /* If we want promiscuous mode, set the allframes bit. */ 2108 if (ifp->if_flags & IFF_PROMISC) { 2109 rxfilt |= XL_RXFILTER_ALLFRAMES; 2110 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 2111 } else { 2112 rxfilt &= ~XL_RXFILTER_ALLFRAMES; 2113 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 2114 } 2115 2116 /* 2117 * Set capture broadcast bit to capture broadcast frames. 2118 */ 2119 if (ifp->if_flags & IFF_BROADCAST) { 2120 rxfilt |= XL_RXFILTER_BROADCAST; 2121 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 2122 } else { 2123 rxfilt &= ~XL_RXFILTER_BROADCAST; 2124 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); 2125 } 2126 2127 /* 2128 * Program the multicast filter, if necessary. 2129 */ 2130 #if 0 2131 if (sc->xl_type == XL_TYPE_905B) 2132 #else 2133 if (0) /* xl_setmulti_hash() does not work right */ 2134 #endif 2135 xl_setmulti_hash(sc); 2136 else 2137 xl_setmulti(sc); 2138 2139 /* 2140 * Load the address of the RX list. We have to 2141 * stall the upload engine before we can manipulate 2142 * the uplist pointer register, then unstall it when 2143 * we're finished. We also have to wait for the 2144 * stall command to complete before proceeding. 2145 * Note that we have to do this after any RX resets 2146 * have completed since the uplist register is cleared 2147 * by a reset. 2148 */ 2149 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 2150 xl_wait(sc); 2151 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr + 2152 offsetof(struct xl_list_data, xl_rx_list[0])); 2153 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 2154 xl_wait(sc); 2155 2156 if (sc->xl_type == XL_TYPE_905B) { 2157 /* Set polling interval */ 2158 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 2159 /* Load the address of the TX list */ 2160 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 2161 xl_wait(sc); 2162 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 2163 sc->sc_listmap->dm_segs[0].ds_addr + 2164 offsetof(struct xl_list_data, xl_tx_list[0])); 2165 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 2166 xl_wait(sc); 2167 } 2168 2169 /* 2170 * If the coax transceiver is on, make sure to enable 2171 * the DC-DC converter. 2172 */ 2173 XL_SEL_WIN(3); 2174 if (sc->xl_xcvr == XL_XCVR_COAX) 2175 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 2176 else 2177 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2178 2179 #if NVLAN > 0 2180 /* Set max packet size to handle VLAN frames, only on 3c905B */ 2181 if (sc->xl_type == XL_TYPE_905B) 2182 CSR_WRITE_2(sc, XL_W3_MAX_PKT_SIZE, 1514 + 4); 2183 #endif 2184 2185 /* Clear out the stats counters. */ 2186 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2187 sc->xl_stats_no_timeout = 1; 2188 xl_stats_update(sc); 2189 sc->xl_stats_no_timeout = 0; 2190 XL_SEL_WIN(4); 2191 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); 2192 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); 2193 2194 /* 2195 * Enable interrupts. 2196 */ 2197 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); 2198 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); 2199 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); 2200 2201 if (sc->intr_ack) 2202 (*sc->intr_ack)(sc); 2203 2204 /* Set the RX early threshold */ 2205 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); 2206 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); 2207 2208 /* Enable receiver and transmitter. */ 2209 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 2210 xl_wait(sc); 2211 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2212 xl_wait(sc); 2213 2214 /* Restore state of BMCR */ 2215 if (mii != NULL) 2216 mii_mediachg(mii); 2217 2218 /* Select window 7 for normal operations. */ 2219 XL_SEL_WIN(7); 2220 2221 ifp->if_flags |= IFF_RUNNING; 2222 ifp->if_flags &= ~IFF_OACTIVE; 2223 2224 splx(s); 2225 2226 timeout_add(&sc->xl_stsup_tmo, hz); 2227 2228 return; 2229 } 2230 2231 /* 2232 * Set media options. 2233 */ 2234 int xl_ifmedia_upd(ifp) 2235 struct ifnet *ifp; 2236 { 2237 struct xl_softc *sc; 2238 struct ifmedia *ifm = NULL; 2239 struct mii_data *mii = NULL; 2240 2241 sc = ifp->if_softc; 2242 2243 if (sc->xl_hasmii) 2244 mii = &sc->sc_mii; 2245 if (mii == NULL) 2246 ifm = &sc->ifmedia; 2247 else 2248 ifm = &mii->mii_media; 2249 2250 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2251 case IFM_100_FX: 2252 case IFM_10_FL: 2253 case IFM_10_2: 2254 case IFM_10_5: 2255 xl_setmode(sc, ifm->ifm_media); 2256 return (0); 2257 break; 2258 default: 2259 break; 2260 } 2261 2262 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2263 || sc->xl_media & XL_MEDIAOPT_BT4) { 2264 xl_init(sc); 2265 } else { 2266 xl_setmode(sc, ifm->ifm_media); 2267 } 2268 2269 return(0); 2270 } 2271 2272 /* 2273 * Report current media status. 2274 */ 2275 void xl_ifmedia_sts(ifp, ifmr) 2276 struct ifnet *ifp; 2277 struct ifmediareq *ifmr; 2278 { 2279 struct xl_softc *sc; 2280 u_int32_t icfg; 2281 struct mii_data *mii = NULL; 2282 2283 sc = ifp->if_softc; 2284 if (sc->xl_hasmii != 0) 2285 mii = &sc->sc_mii; 2286 2287 XL_SEL_WIN(3); 2288 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; 2289 icfg >>= XL_ICFG_CONNECTOR_BITS; 2290 2291 ifmr->ifm_active = IFM_ETHER; 2292 2293 switch(icfg) { 2294 case XL_XCVR_10BT: 2295 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2296 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2297 ifmr->ifm_active |= IFM_FDX; 2298 else 2299 ifmr->ifm_active |= IFM_HDX; 2300 break; 2301 case XL_XCVR_AUI: 2302 if (sc->xl_type == XL_TYPE_905B && 2303 sc->xl_media == XL_MEDIAOPT_10FL) { 2304 ifmr->ifm_active = IFM_ETHER|IFM_10_FL; 2305 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2306 ifmr->ifm_active |= IFM_FDX; 2307 else 2308 ifmr->ifm_active |= IFM_FDX; 2309 } else 2310 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2311 break; 2312 case XL_XCVR_COAX: 2313 ifmr->ifm_active = IFM_ETHER|IFM_10_2; 2314 break; 2315 /* 2316 * XXX MII and BTX/AUTO should be separate cases. 2317 */ 2318 2319 case XL_XCVR_100BTX: 2320 case XL_XCVR_AUTO: 2321 case XL_XCVR_MII: 2322 if (mii != NULL) { 2323 mii_pollstat(mii); 2324 ifmr->ifm_active = mii->mii_media_active; 2325 ifmr->ifm_status = mii->mii_media_status; 2326 } 2327 break; 2328 case XL_XCVR_100BFX: 2329 ifmr->ifm_active = IFM_ETHER|IFM_100_FX; 2330 break; 2331 default: 2332 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg); 2333 break; 2334 } 2335 2336 return; 2337 } 2338 2339 int 2340 xl_ioctl(ifp, command, data) 2341 struct ifnet *ifp; 2342 u_long command; 2343 caddr_t data; 2344 { 2345 struct xl_softc *sc = ifp->if_softc; 2346 struct ifreq *ifr = (struct ifreq *)data; 2347 struct ifaddr *ifa = (struct ifaddr *)data; 2348 int s, error = 0; 2349 struct mii_data *mii = NULL; 2350 u_int8_t rxfilt; 2351 2352 s = splimp(); 2353 2354 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { 2355 splx(s); 2356 return error; 2357 } 2358 2359 switch(command) { 2360 case SIOCSIFADDR: 2361 ifp->if_flags |= IFF_UP; 2362 switch (ifa->ifa_addr->sa_family) { 2363 #ifdef INET 2364 case AF_INET: 2365 xl_init(sc); 2366 arp_ifinit(&sc->sc_arpcom, ifa); 2367 break; 2368 #endif /* INET */ 2369 default: 2370 xl_init(sc); 2371 break; 2372 } 2373 break; 2374 2375 case SIOCSIFMTU: 2376 if(ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 2377 error = EINVAL; 2378 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2379 ifp->if_mtu = ifr->ifr_mtu; 2380 } 2381 break; 2382 2383 case SIOCSIFFLAGS: 2384 XL_SEL_WIN(5); 2385 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 2386 if (ifp->if_flags & IFF_UP) { 2387 if (ifp->if_flags & IFF_RUNNING && 2388 ifp->if_flags & IFF_PROMISC && 2389 !(sc->xl_if_flags & IFF_PROMISC)) { 2390 rxfilt |= XL_RXFILTER_ALLFRAMES; 2391 CSR_WRITE_2(sc, XL_COMMAND, 2392 XL_CMD_RX_SET_FILT|rxfilt); 2393 XL_SEL_WIN(7); 2394 } else if (ifp->if_flags & IFF_RUNNING && 2395 !(ifp->if_flags & IFF_PROMISC) && 2396 sc->xl_if_flags & IFF_PROMISC) { 2397 rxfilt &= ~XL_RXFILTER_ALLFRAMES; 2398 CSR_WRITE_2(sc, XL_COMMAND, 2399 XL_CMD_RX_SET_FILT|rxfilt); 2400 XL_SEL_WIN(7); 2401 } else 2402 xl_init(sc); 2403 } else { 2404 if (ifp->if_flags & IFF_RUNNING) 2405 xl_stop(sc); 2406 } 2407 sc->xl_if_flags = ifp->if_flags; 2408 error = 0; 2409 break; 2410 case SIOCADDMULTI: 2411 case SIOCDELMULTI: 2412 error = (command == SIOCADDMULTI) ? 2413 ether_addmulti(ifr, &sc->sc_arpcom) : 2414 ether_delmulti(ifr, &sc->sc_arpcom); 2415 2416 if (error == ENETRESET) { 2417 /* 2418 * Multicast list has changed; set the hardware 2419 * filter accordingly. 2420 */ 2421 #if 0 2422 if (sc->xl_type == XL_TYPE_905B) 2423 #else 2424 if (0) /* xl_setmulti_hash() does not work right */ 2425 #endif 2426 xl_setmulti_hash(sc); 2427 else 2428 xl_setmulti(sc); 2429 error = 0; 2430 } 2431 break; 2432 case SIOCGIFMEDIA: 2433 case SIOCSIFMEDIA: 2434 if (sc->xl_hasmii != 0) 2435 mii = &sc->sc_mii; 2436 if (mii == NULL) 2437 error = ifmedia_ioctl(ifp, ifr, 2438 &sc->ifmedia, command); 2439 else 2440 error = ifmedia_ioctl(ifp, ifr, 2441 &mii->mii_media, command); 2442 break; 2443 default: 2444 error = EINVAL; 2445 break; 2446 } 2447 2448 splx(s); 2449 2450 return(error); 2451 } 2452 2453 void xl_watchdog(ifp) 2454 struct ifnet *ifp; 2455 { 2456 struct xl_softc *sc; 2457 u_int16_t status = 0; 2458 2459 sc = ifp->if_softc; 2460 2461 ifp->if_oerrors++; 2462 XL_SEL_WIN(4); 2463 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2464 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2465 2466 if (status & XL_MEDIASTAT_CARRIER) 2467 printf("%s: no carrier - transceiver cable problem?\n", 2468 sc->sc_dev.dv_xname); 2469 xl_txeoc(sc); 2470 xl_txeof(sc); 2471 xl_rxeof(sc); 2472 xl_reset(sc, 0); 2473 xl_init(sc); 2474 2475 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2476 (*ifp->if_start)(ifp); 2477 2478 return; 2479 } 2480 2481 void 2482 xl_freetxrx(sc) 2483 struct xl_softc *sc; 2484 { 2485 int i; 2486 2487 /* 2488 * Free data in the RX lists. 2489 */ 2490 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2491 if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) { 2492 bus_dmamap_t map = sc->xl_cdata.xl_rx_chain[i].map; 2493 2494 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2495 BUS_DMASYNC_POSTREAD); 2496 bus_dmamap_unload(sc->sc_dmat, map); 2497 } 2498 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { 2499 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); 2500 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; 2501 } 2502 } 2503 bzero((char *)&sc->xl_ldata->xl_rx_list, 2504 sizeof(sc->xl_ldata->xl_rx_list)); 2505 /* 2506 * Free the TX list buffers. 2507 */ 2508 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2509 if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) { 2510 bus_dmamap_t map = sc->xl_cdata.xl_tx_chain[i].map; 2511 2512 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2513 BUS_DMASYNC_POSTWRITE); 2514 bus_dmamap_unload(sc->sc_dmat, map); 2515 } 2516 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { 2517 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); 2518 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; 2519 } 2520 } 2521 bzero((char *)&sc->xl_ldata->xl_tx_list, 2522 sizeof(sc->xl_ldata->xl_tx_list)); 2523 } 2524 2525 /* 2526 * Stop the adapter and free any mbufs allocated to the 2527 * RX and TX lists. 2528 */ 2529 void xl_stop(sc) 2530 struct xl_softc *sc; 2531 { 2532 struct ifnet *ifp; 2533 2534 ifp = &sc->sc_arpcom.ac_if; 2535 ifp->if_timer = 0; 2536 2537 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); 2538 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2539 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); 2540 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); 2541 xl_wait(sc); 2542 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); 2543 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2544 DELAY(800); 2545 2546 #ifdef foo 2547 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 2548 xl_wait(sc); 2549 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 2550 xl_wait(sc); 2551 #endif 2552 2553 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); 2554 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); 2555 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); 2556 2557 if (sc->intr_ack) 2558 (*sc->intr_ack)(sc); 2559 2560 /* Stop the stats updater. */ 2561 timeout_del(&sc->xl_stsup_tmo); 2562 2563 xl_freetxrx(sc); 2564 2565 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2566 2567 return; 2568 } 2569 2570 void 2571 xl_attach(sc) 2572 struct xl_softc *sc; 2573 { 2574 u_int8_t enaddr[ETHER_ADDR_LEN]; 2575 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2576 int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX; 2577 struct ifmedia *ifm; 2578 2579 i = splimp(); 2580 xl_reset(sc, 1); 2581 splx(i); 2582 2583 /* 2584 * Get station address from the EEPROM. 2585 */ 2586 if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) { 2587 printf("\n%s: failed to read station address\n", 2588 sc->sc_dev.dv_xname); 2589 return; 2590 } 2591 bcopy(enaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 2592 2593 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data), 2594 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 2595 BUS_DMA_NOWAIT) != 0) { 2596 printf(": can't alloc list mem\n"); 2597 return; 2598 } 2599 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 2600 sizeof(struct xl_list_data), &sc->sc_listkva, 2601 BUS_DMA_NOWAIT) != 0) { 2602 printf(": can't map list mem\n"); 2603 return; 2604 } 2605 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1, 2606 sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT, 2607 &sc->sc_listmap) != 0) { 2608 printf(": can't alloc list map\n"); 2609 return; 2610 } 2611 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 2612 sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 2613 printf(": can't load list map\n"); 2614 return; 2615 } 2616 sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva; 2617 bzero(sc->xl_ldata, sizeof(struct xl_list_data)); 2618 2619 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2620 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 2621 0, BUS_DMA_NOWAIT, 2622 &sc->xl_cdata.xl_rx_chain[i].map) != 0) { 2623 printf(": can't create rx map\n"); 2624 return; 2625 } 2626 } 2627 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 2628 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 2629 printf(": can't create rx spare map\n"); 2630 return; 2631 } 2632 2633 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2634 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 2635 XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT, 2636 &sc->xl_cdata.xl_tx_chain[i].map) != 0) { 2637 printf(": can't create tx map\n"); 2638 return; 2639 } 2640 } 2641 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3, 2642 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 2643 printf(": can't create tx spare map\n"); 2644 return; 2645 } 2646 2647 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 2648 2649 if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) { 2650 u_int16_t n; 2651 2652 XL_SEL_WIN(2); 2653 n = CSR_READ_2(sc, 12); 2654 2655 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR) 2656 n |= 0x0010; 2657 2658 if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) 2659 n |= 0x4000; 2660 2661 CSR_WRITE_2(sc, 12, n); 2662 } 2663 2664 /* 2665 * Figure out the card type. 3c905B adapters have the 2666 * 'supportsNoTxLength' bit set in the capabilities 2667 * word in the EEPROM. 2668 */ 2669 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); 2670 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH) 2671 sc->xl_type = XL_TYPE_905B; 2672 else 2673 sc->xl_type = XL_TYPE_90X; 2674 2675 timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc); 2676 2677 ifp->if_softc = sc; 2678 ifp->if_mtu = ETHERMTU; 2679 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2680 ifp->if_ioctl = xl_ioctl; 2681 ifp->if_output = ether_output; 2682 if (sc->xl_type == XL_TYPE_905B) { 2683 ifp->if_start = xl_start_90xB; 2684 ifp->if_capabilities = IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 2685 IFCAP_CSUM_UDPv4; 2686 } else 2687 ifp->if_start = xl_start; 2688 ifp->if_watchdog = xl_watchdog; 2689 ifp->if_baudrate = 10000000; 2690 IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); 2691 IFQ_SET_READY(&ifp->if_snd); 2692 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 2693 2694 #if NVLAN > 0 2695 if (sc->xl_type == XL_TYPE_905B) 2696 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2697 /* 2698 * XXX 2699 * Do other cards filter large packets or simply pass them through? 2700 * Apparently only the 905B has the capability to set a larger size. 2701 */ 2702 #endif 2703 2704 XL_SEL_WIN(3); 2705 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); 2706 2707 xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0); 2708 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; 2709 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; 2710 2711 DELAY(100000); 2712 2713 xl_mediacheck(sc); 2714 2715 if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { 2716 XL_SEL_WIN(2); 2717 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, XL_RESETOPT_INVMIIPWR | 2718 CSR_READ_2(sc, XL_W2_RESET_OPTIONS)); 2719 } 2720 2721 DELAY(100000); 2722 2723 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2724 || sc->xl_media & XL_MEDIAOPT_BT4) { 2725 ifmedia_init(&sc->sc_mii.mii_media, 0, 2726 xl_ifmedia_upd, xl_ifmedia_sts); 2727 sc->xl_hasmii = 1; 2728 sc->sc_mii.mii_ifp = ifp; 2729 sc->sc_mii.mii_readreg = xl_miibus_readreg; 2730 sc->sc_mii.mii_writereg = xl_miibus_writereg; 2731 sc->sc_mii.mii_statchg = xl_miibus_statchg; 2732 xl_setcfg(sc); 2733 mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff, 2734 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2735 2736 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2737 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 2738 0, NULL); 2739 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2740 } 2741 else { 2742 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2743 } 2744 ifm = &sc->sc_mii.mii_media; 2745 } 2746 else { 2747 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); 2748 sc->xl_hasmii = 0; 2749 ifm = &sc->ifmedia; 2750 } 2751 2752 /* 2753 * Sanity check. If the user has selected "auto" and this isn't 2754 * a 10/100 card of some kind, we need to force the transceiver 2755 * type to something sane. 2756 */ 2757 if (sc->xl_xcvr == XL_XCVR_AUTO) { 2758 xl_choose_xcvr(sc, 0); 2759 i = splimp(); 2760 xl_reset(sc, 0); 2761 splx(i); 2762 } 2763 2764 if (sc->xl_media & XL_MEDIAOPT_BT) { 2765 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL); 2766 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 2767 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2768 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2769 } 2770 2771 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 2772 /* 2773 * Check for a 10baseFL board in disguise. 2774 */ 2775 if (sc->xl_type == XL_TYPE_905B && 2776 sc->xl_media == XL_MEDIAOPT_10FL) { 2777 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL); 2778 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 2779 0, NULL); 2780 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2781 ifmedia_add(ifm, 2782 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); 2783 } else { 2784 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL); 2785 } 2786 } 2787 2788 if (sc->xl_media & XL_MEDIAOPT_BNC) { 2789 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL); 2790 } 2791 2792 if (sc->xl_media & XL_MEDIAOPT_BFX) { 2793 ifp->if_baudrate = 100000000; 2794 ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL); 2795 } 2796 2797 /* Choose a default media. */ 2798 switch(sc->xl_xcvr) { 2799 case XL_XCVR_10BT: 2800 media = IFM_ETHER|IFM_10_T; 2801 xl_setmode(sc, media); 2802 break; 2803 case XL_XCVR_AUI: 2804 if (sc->xl_type == XL_TYPE_905B && 2805 sc->xl_media == XL_MEDIAOPT_10FL) { 2806 media = IFM_ETHER|IFM_10_FL; 2807 xl_setmode(sc, media); 2808 } else { 2809 media = IFM_ETHER|IFM_10_5; 2810 xl_setmode(sc, media); 2811 } 2812 break; 2813 case XL_XCVR_COAX: 2814 media = IFM_ETHER|IFM_10_2; 2815 xl_setmode(sc, media); 2816 break; 2817 case XL_XCVR_AUTO: 2818 case XL_XCVR_100BTX: 2819 case XL_XCVR_MII: 2820 /* Chosen by miibus */ 2821 break; 2822 case XL_XCVR_100BFX: 2823 media = IFM_ETHER|IFM_100_FX; 2824 xl_setmode(sc, media); 2825 break; 2826 default: 2827 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, 2828 sc->xl_xcvr); 2829 /* 2830 * This will probably be wrong, but it prevents 2831 * the ifmedia code from panicking. 2832 */ 2833 media = IFM_ETHER | IFM_10_T; 2834 break; 2835 } 2836 2837 if (sc->xl_hasmii == 0) 2838 ifmedia_set(&sc->ifmedia, media); 2839 2840 /* 2841 * Call MI attach routines. 2842 */ 2843 if_attach(ifp); 2844 ether_ifattach(ifp); 2845 2846 sc->sc_sdhook = shutdownhook_establish(xl_shutdown, sc); 2847 sc->sc_pwrhook = powerhook_establish(xl_power, sc); 2848 } 2849 2850 int 2851 xl_detach(sc) 2852 struct xl_softc *sc; 2853 { 2854 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2855 2856 /* Unhook our tick handler. */ 2857 timeout_del(&sc->xl_stsup_tmo); 2858 2859 xl_freetxrx(sc); 2860 2861 /* Detach all PHYs */ 2862 if (sc->xl_hasmii) 2863 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2864 2865 /* Delete all remaining media. */ 2866 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2867 2868 ether_ifdetach(ifp); 2869 if_detach(ifp); 2870 2871 shutdownhook_disestablish(sc->sc_sdhook); 2872 powerhook_disestablish(sc->sc_pwrhook); 2873 2874 return (0); 2875 } 2876 2877 void 2878 xl_shutdown(v) 2879 void *v; 2880 { 2881 struct xl_softc *sc = (struct xl_softc *)v; 2882 2883 xl_reset(sc, 1); 2884 xl_stop(sc); 2885 } 2886 2887 struct cfdriver xl_cd = { 2888 0, "xl", DV_IFNET 2889 }; 2890