1 /* $OpenBSD: xl.c,v 1.136 2020/12/12 11:48:52 jan Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $ 35 */ 36 37 /* 38 * 3Com 3c90x Etherlink XL PCI NIC driver 39 * 40 * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI 41 * bus-master chips (3c90x cards and embedded controllers) including 42 * the following: 43 * 44 * 3Com 3c900-TPO 10Mbps/RJ-45 45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC 46 * 3Com 3c905-TX 10/100Mbps/RJ-45 47 * 3Com 3c905-T4 10/100Mbps/RJ-45 48 * 3Com 3c900B-TPO 10Mbps/RJ-45 49 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC 50 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC 51 * 3Com 3c900B-FL 10Mbps/Fiber-optic 52 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC 53 * 3Com 3c905B-TX 10/100Mbps/RJ-45 54 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic 55 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) 56 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) 57 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) 58 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) 59 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) 60 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) 61 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 62 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) 63 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 64 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 65 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 66 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 67 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) 68 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) 69 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 70 * Dell on-board 3c920 10/100Mbps/RJ-45 71 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 72 * Dell Latitude laptop docking station embedded 3c905-TX 73 * 74 * Written by Bill Paul <wpaul@ctr.columbia.edu> 75 * Electrical Engineering Department 76 * Columbia University, New York City 77 */ 78 79 /* 80 * The 3c90x series chips use a bus-master DMA interface for transferring 81 * packets to and from the controller chip. Some of the "vortex" cards 82 * (3c59x) also supported a bus master mode, however for those chips 83 * you could only DMA packets to/from a contiguous memory buffer. For 84 * transmission this would mean copying the contents of the queued mbuf 85 * chain into an mbuf cluster and then DMAing the cluster. This extra 86 * copy would sort of defeat the purpose of the bus master support for 87 * any packet that doesn't fit into a single mbuf. 88 * 89 * By contrast, the 3c90x cards support a fragment-based bus master 90 * mode where mbuf chains can be encapsulated using TX descriptors. 91 * This is similar to other PCI chips such as the Texas Instruments 92 * ThunderLAN and the Intel 82557/82558. 93 * 94 * The "vortex" driver (if_vx.c) happens to work for the "boomerang" 95 * bus master chips because they maintain the old PIO interface for 96 * backwards compatibility, but starting with the 3c905B and the 97 * "cyclone" chips, the compatibility interface has been dropped. 98 * Since using bus master DMA is a big win, we use this driver to 99 * support the PCI "boomerang" chips even though they work with the 100 * "vortex" driver in order to obtain better performance. 101 */ 102 103 #include "bpfilter.h" 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/mbuf.h> 108 #include <sys/protosw.h> 109 #include <sys/socket.h> 110 #include <sys/ioctl.h> 111 #include <sys/errno.h> 112 #include <sys/malloc.h> 113 #include <sys/kernel.h> 114 #include <sys/device.h> 115 116 #include <net/if.h> 117 #include <net/if_media.h> 118 119 #include <netinet/in.h> 120 #include <netinet/if_ether.h> 121 122 #include <dev/mii/miivar.h> 123 124 #include <machine/bus.h> 125 126 #if NBPFILTER > 0 127 #include <net/bpf.h> 128 #endif 129 130 #include <dev/ic/xlreg.h> 131 132 /* 133 * TX Checksumming is disabled by default for two reasons: 134 * - TX Checksumming will occasionally produce corrupt packets 135 * - TX Checksumming seems to reduce performance 136 * 137 * Only 905B/C cards were reported to have this problem, it is possible 138 * that later chips _may_ be immune. 139 */ 140 #define XL905B_TXCSUM_BROKEN 1 141 142 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); 143 void xl_stats_update(void *); 144 int xl_encap(struct xl_softc *, struct xl_chain *, 145 struct mbuf * ); 146 void xl_rxeof(struct xl_softc *); 147 void xl_txeof(struct xl_softc *); 148 void xl_txeof_90xB(struct xl_softc *); 149 void xl_txeoc(struct xl_softc *); 150 int xl_intr(void *); 151 void xl_start(struct ifnet *); 152 void xl_start_90xB(struct ifnet *); 153 int xl_ioctl(struct ifnet *, u_long, caddr_t); 154 void xl_freetxrx(struct xl_softc *); 155 void xl_watchdog(struct ifnet *); 156 int xl_ifmedia_upd(struct ifnet *); 157 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 158 159 int xl_eeprom_wait(struct xl_softc *); 160 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); 161 void xl_mii_sync(struct xl_softc *); 162 void xl_mii_send(struct xl_softc *, u_int32_t, int); 163 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *); 164 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *); 165 166 void xl_setcfg(struct xl_softc *); 167 void xl_setmode(struct xl_softc *, uint64_t); 168 void xl_iff(struct xl_softc *); 169 void xl_iff_90x(struct xl_softc *); 170 void xl_iff_905b(struct xl_softc *); 171 int xl_list_rx_init(struct xl_softc *); 172 void xl_fill_rx_ring(struct xl_softc *); 173 int xl_list_tx_init(struct xl_softc *); 174 int xl_list_tx_init_90xB(struct xl_softc *); 175 void xl_wait(struct xl_softc *); 176 void xl_mediacheck(struct xl_softc *); 177 void xl_choose_xcvr(struct xl_softc *, int); 178 179 int xl_miibus_readreg(struct device *, int, int); 180 void xl_miibus_writereg(struct device *, int, int, int); 181 void xl_miibus_statchg(struct device *); 182 #ifndef SMALL_KERNEL 183 int xl_wol(struct ifnet *, int); 184 void xl_wol_power(struct xl_softc *); 185 #endif 186 187 int 188 xl_activate(struct device *self, int act) 189 { 190 struct xl_softc *sc = (struct xl_softc *)self; 191 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 192 int rv = 0; 193 194 switch (act) { 195 case DVACT_SUSPEND: 196 if (ifp->if_flags & IFF_RUNNING) 197 xl_stop(sc); 198 rv = config_activate_children(self, act); 199 break; 200 case DVACT_RESUME: 201 if (ifp->if_flags & IFF_UP) 202 xl_init(sc); 203 break; 204 case DVACT_POWERDOWN: 205 rv = config_activate_children(self, act); 206 #ifndef SMALL_KERNEL 207 xl_wol_power(sc); 208 #endif 209 break; 210 default: 211 rv = config_activate_children(self, act); 212 break; 213 } 214 return (rv); 215 } 216 217 /* 218 * Murphy's law says that it's possible the chip can wedge and 219 * the 'command in progress' bit may never clear. Hence, we wait 220 * only a finite amount of time to avoid getting caught in an 221 * infinite loop. Normally this delay routine would be a macro, 222 * but it isn't called during normal operation so we can afford 223 * to make it a function. 224 */ 225 void 226 xl_wait(struct xl_softc *sc) 227 { 228 int i; 229 230 for (i = 0; i < XL_TIMEOUT; i++) { 231 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 232 break; 233 } 234 235 if (i == XL_TIMEOUT) 236 printf("%s: command never completed!\n", sc->sc_dev.dv_xname); 237 } 238 239 /* 240 * MII access routines are provided for adapters with external 241 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in 242 * autoneg logic that's faked up to look like a PHY (3c905B-TX). 243 * Note: if you don't perform the MDIO operations just right, 244 * it's possible to end up with code that works correctly with 245 * some chips/CPUs/processor speeds/bus speeds/etc but not 246 * with others. 247 */ 248 #define MII_SET(x) \ 249 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 250 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x)) 251 252 #define MII_CLR(x) \ 253 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ 254 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x)) 255 256 /* 257 * Sync the PHYs by setting data bit and strobing the clock 32 times. 258 */ 259 void 260 xl_mii_sync(struct xl_softc *sc) 261 { 262 int i; 263 264 XL_SEL_WIN(4); 265 MII_SET(XL_MII_DIR|XL_MII_DATA); 266 267 for (i = 0; i < 32; i++) { 268 MII_SET(XL_MII_CLK); 269 MII_SET(XL_MII_DATA); 270 MII_SET(XL_MII_DATA); 271 MII_CLR(XL_MII_CLK); 272 MII_SET(XL_MII_DATA); 273 MII_SET(XL_MII_DATA); 274 } 275 } 276 277 /* 278 * Clock a series of bits through the MII. 279 */ 280 void 281 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt) 282 { 283 int i; 284 285 XL_SEL_WIN(4); 286 MII_CLR(XL_MII_CLK); 287 288 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 289 if (bits & i) { 290 MII_SET(XL_MII_DATA); 291 } else { 292 MII_CLR(XL_MII_DATA); 293 } 294 MII_CLR(XL_MII_CLK); 295 MII_SET(XL_MII_CLK); 296 } 297 } 298 299 /* 300 * Read an PHY register through the MII. 301 */ 302 int 303 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame) 304 { 305 int i, ack, s; 306 307 s = splnet(); 308 309 /* 310 * Set up frame for RX. 311 */ 312 frame->mii_stdelim = XL_MII_STARTDELIM; 313 frame->mii_opcode = XL_MII_READOP; 314 frame->mii_turnaround = 0; 315 frame->mii_data = 0; 316 317 /* 318 * Select register window 4. 319 */ 320 321 XL_SEL_WIN(4); 322 323 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); 324 /* 325 * Turn on data xmit. 326 */ 327 MII_SET(XL_MII_DIR); 328 329 xl_mii_sync(sc); 330 331 /* 332 * Send command/address info. 333 */ 334 xl_mii_send(sc, frame->mii_stdelim, 2); 335 xl_mii_send(sc, frame->mii_opcode, 2); 336 xl_mii_send(sc, frame->mii_phyaddr, 5); 337 xl_mii_send(sc, frame->mii_regaddr, 5); 338 339 /* Idle bit */ 340 MII_CLR((XL_MII_CLK|XL_MII_DATA)); 341 MII_SET(XL_MII_CLK); 342 343 /* Turn off xmit. */ 344 MII_CLR(XL_MII_DIR); 345 346 /* Check for ack */ 347 MII_CLR(XL_MII_CLK); 348 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; 349 MII_SET(XL_MII_CLK); 350 351 /* 352 * Now try reading data bits. If the ack failed, we still 353 * need to clock through 16 cycles to keep the PHY(s) in sync. 354 */ 355 if (ack) { 356 for(i = 0; i < 16; i++) { 357 MII_CLR(XL_MII_CLK); 358 MII_SET(XL_MII_CLK); 359 } 360 goto fail; 361 } 362 363 for (i = 0x8000; i; i >>= 1) { 364 MII_CLR(XL_MII_CLK); 365 if (!ack) { 366 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) 367 frame->mii_data |= i; 368 } 369 MII_SET(XL_MII_CLK); 370 } 371 372 fail: 373 374 MII_CLR(XL_MII_CLK); 375 MII_SET(XL_MII_CLK); 376 377 splx(s); 378 379 if (ack) 380 return (1); 381 return (0); 382 } 383 384 /* 385 * Write to a PHY register through the MII. 386 */ 387 int 388 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame) 389 { 390 int s; 391 392 s = splnet(); 393 394 /* 395 * Set up frame for TX. 396 */ 397 398 frame->mii_stdelim = XL_MII_STARTDELIM; 399 frame->mii_opcode = XL_MII_WRITEOP; 400 frame->mii_turnaround = XL_MII_TURNAROUND; 401 402 /* 403 * Select the window 4. 404 */ 405 XL_SEL_WIN(4); 406 407 /* 408 * Turn on data output. 409 */ 410 MII_SET(XL_MII_DIR); 411 412 xl_mii_sync(sc); 413 414 xl_mii_send(sc, frame->mii_stdelim, 2); 415 xl_mii_send(sc, frame->mii_opcode, 2); 416 xl_mii_send(sc, frame->mii_phyaddr, 5); 417 xl_mii_send(sc, frame->mii_regaddr, 5); 418 xl_mii_send(sc, frame->mii_turnaround, 2); 419 xl_mii_send(sc, frame->mii_data, 16); 420 421 /* Idle bit. */ 422 MII_SET(XL_MII_CLK); 423 MII_CLR(XL_MII_CLK); 424 425 /* 426 * Turn off xmit. 427 */ 428 MII_CLR(XL_MII_DIR); 429 430 splx(s); 431 432 return (0); 433 } 434 435 int 436 xl_miibus_readreg(struct device *self, int phy, int reg) 437 { 438 struct xl_softc *sc = (struct xl_softc *)self; 439 struct xl_mii_frame frame; 440 441 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 442 return (0); 443 444 bzero(&frame, sizeof(frame)); 445 446 frame.mii_phyaddr = phy; 447 frame.mii_regaddr = reg; 448 xl_mii_readreg(sc, &frame); 449 450 return (frame.mii_data); 451 } 452 453 void 454 xl_miibus_writereg(struct device *self, int phy, int reg, int data) 455 { 456 struct xl_softc *sc = (struct xl_softc *)self; 457 struct xl_mii_frame frame; 458 459 if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24) 460 return; 461 462 bzero(&frame, sizeof(frame)); 463 464 frame.mii_phyaddr = phy; 465 frame.mii_regaddr = reg; 466 frame.mii_data = data; 467 468 xl_mii_writereg(sc, &frame); 469 } 470 471 void 472 xl_miibus_statchg(struct device *self) 473 { 474 struct xl_softc *sc = (struct xl_softc *)self; 475 476 xl_setcfg(sc); 477 478 /* Set ASIC's duplex mode to match the PHY. */ 479 XL_SEL_WIN(3); 480 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 481 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 482 else 483 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 484 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 485 } 486 487 /* 488 * The EEPROM is slow: give it time to come ready after issuing 489 * it a command. 490 */ 491 int 492 xl_eeprom_wait(struct xl_softc *sc) 493 { 494 int i; 495 496 for (i = 0; i < 100; i++) { 497 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) 498 DELAY(162); 499 else 500 break; 501 } 502 503 if (i == 100) { 504 printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname); 505 return (1); 506 } 507 508 return (0); 509 } 510 511 /* 512 * Read a sequence of words from the EEPROM. Note that ethernet address 513 * data is stored in the EEPROM in network byte order. 514 */ 515 int 516 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) 517 { 518 int err = 0, i; 519 u_int16_t word = 0, *ptr; 520 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) 521 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) 522 /* WARNING! DANGER! 523 * It's easy to accidentally overwrite the rom content! 524 * Note: the 3c575 uses 8bit EEPROM offsets. 525 */ 526 XL_SEL_WIN(0); 527 528 if (xl_eeprom_wait(sc)) 529 return (1); 530 531 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) 532 off += 0x30; 533 534 for (i = 0; i < cnt; i++) { 535 if (sc->xl_flags & XL_FLAG_8BITROM) 536 CSR_WRITE_2(sc, XL_W0_EE_CMD, 537 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); 538 else 539 CSR_WRITE_2(sc, XL_W0_EE_CMD, 540 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); 541 err = xl_eeprom_wait(sc); 542 if (err) 543 break; 544 word = CSR_READ_2(sc, XL_W0_EE_DATA); 545 ptr = (u_int16_t *)(dest + (i * 2)); 546 if (swap) 547 *ptr = ntohs(word); 548 else 549 *ptr = word; 550 } 551 552 return (err ? 1 : 0); 553 } 554 555 void 556 xl_iff(struct xl_softc *sc) 557 { 558 if (sc->xl_type == XL_TYPE_905B) 559 xl_iff_905b(sc); 560 else 561 xl_iff_90x(sc); 562 } 563 564 /* 565 * NICs older than the 3c905B have only one multicast option, which 566 * is to enable reception of all multicast frames. 567 */ 568 void 569 xl_iff_90x(struct xl_softc *sc) 570 { 571 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 572 struct arpcom *ac = &sc->sc_arpcom; 573 u_int8_t rxfilt; 574 575 XL_SEL_WIN(5); 576 577 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 578 rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | 579 XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL); 580 ifp->if_flags &= ~IFF_ALLMULTI; 581 582 /* 583 * Always accept broadcast frames. 584 * Always accept frames destined to our station address. 585 */ 586 rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL; 587 588 if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) { 589 ifp->if_flags |= IFF_ALLMULTI; 590 if (ifp->if_flags & IFF_PROMISC) 591 rxfilt |= XL_RXFILTER_ALLFRAMES; 592 else 593 rxfilt |= XL_RXFILTER_ALLMULTI; 594 } 595 596 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt); 597 598 XL_SEL_WIN(7); 599 } 600 601 /* 602 * 3c905B adapters have a hash filter that we can program. 603 */ 604 void 605 xl_iff_905b(struct xl_softc *sc) 606 { 607 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 608 struct arpcom *ac = &sc->sc_arpcom; 609 int h = 0, i; 610 struct ether_multi *enm; 611 struct ether_multistep step; 612 u_int8_t rxfilt; 613 614 XL_SEL_WIN(5); 615 616 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); 617 rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | 618 XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL | 619 XL_RXFILTER_MULTIHASH); 620 ifp->if_flags &= ~IFF_ALLMULTI; 621 622 /* 623 * Always accept broadcast frames. 624 * Always accept frames destined to our station address. 625 */ 626 rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL; 627 628 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 629 ifp->if_flags |= IFF_ALLMULTI; 630 if (ifp->if_flags & IFF_PROMISC) 631 rxfilt |= XL_RXFILTER_ALLFRAMES; 632 else 633 rxfilt |= XL_RXFILTER_ALLMULTI; 634 } else { 635 rxfilt |= XL_RXFILTER_MULTIHASH; 636 637 /* first, zot all the existing hash bits */ 638 for (i = 0; i < XL_HASHFILT_SIZE; i++) 639 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); 640 641 /* now program new ones */ 642 ETHER_FIRST_MULTI(step, ac, enm); 643 while (enm != NULL) { 644 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 645 0x000000FF; 646 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | 647 XL_HASH_SET | h); 648 649 ETHER_NEXT_MULTI(step, enm); 650 } 651 } 652 653 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt); 654 655 XL_SEL_WIN(7); 656 } 657 658 void 659 xl_setcfg(struct xl_softc *sc) 660 { 661 u_int32_t icfg; 662 663 XL_SEL_WIN(3); 664 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 665 icfg &= ~XL_ICFG_CONNECTOR_MASK; 666 if (sc->xl_media & XL_MEDIAOPT_MII || 667 sc->xl_media & XL_MEDIAOPT_BT4) 668 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); 669 if (sc->xl_media & XL_MEDIAOPT_BTX) 670 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); 671 672 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 673 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 674 } 675 676 void 677 xl_setmode(struct xl_softc *sc, uint64_t media) 678 { 679 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 680 u_int32_t icfg; 681 u_int16_t mediastat; 682 683 XL_SEL_WIN(4); 684 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 685 XL_SEL_WIN(3); 686 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); 687 688 if (sc->xl_media & XL_MEDIAOPT_BT) { 689 if (IFM_SUBTYPE(media) == IFM_10_T) { 690 ifp->if_baudrate = IF_Mbps(10); 691 sc->xl_xcvr = XL_XCVR_10BT; 692 icfg &= ~XL_ICFG_CONNECTOR_MASK; 693 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); 694 mediastat |= XL_MEDIASTAT_LINKBEAT| 695 XL_MEDIASTAT_JABGUARD; 696 mediastat &= ~XL_MEDIASTAT_SQEENB; 697 } 698 } 699 700 if (sc->xl_media & XL_MEDIAOPT_BFX) { 701 if (IFM_SUBTYPE(media) == IFM_100_FX) { 702 ifp->if_baudrate = IF_Mbps(100); 703 sc->xl_xcvr = XL_XCVR_100BFX; 704 icfg &= ~XL_ICFG_CONNECTOR_MASK; 705 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); 706 mediastat |= XL_MEDIASTAT_LINKBEAT; 707 mediastat &= ~XL_MEDIASTAT_SQEENB; 708 } 709 } 710 711 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 712 if (IFM_SUBTYPE(media) == IFM_10_5) { 713 ifp->if_baudrate = IF_Mbps(10); 714 sc->xl_xcvr = XL_XCVR_AUI; 715 icfg &= ~XL_ICFG_CONNECTOR_MASK; 716 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 717 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 718 XL_MEDIASTAT_JABGUARD); 719 mediastat |= ~XL_MEDIASTAT_SQEENB; 720 } 721 if (IFM_SUBTYPE(media) == IFM_10_FL) { 722 ifp->if_baudrate = IF_Mbps(10); 723 sc->xl_xcvr = XL_XCVR_AUI; 724 icfg &= ~XL_ICFG_CONNECTOR_MASK; 725 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); 726 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 727 XL_MEDIASTAT_JABGUARD); 728 mediastat |= ~XL_MEDIASTAT_SQEENB; 729 } 730 } 731 732 if (sc->xl_media & XL_MEDIAOPT_BNC) { 733 if (IFM_SUBTYPE(media) == IFM_10_2) { 734 ifp->if_baudrate = IF_Mbps(10); 735 sc->xl_xcvr = XL_XCVR_COAX; 736 icfg &= ~XL_ICFG_CONNECTOR_MASK; 737 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); 738 mediastat &= ~(XL_MEDIASTAT_LINKBEAT| 739 XL_MEDIASTAT_JABGUARD| 740 XL_MEDIASTAT_SQEENB); 741 } 742 } 743 744 if ((media & IFM_GMASK) == IFM_FDX || 745 IFM_SUBTYPE(media) == IFM_100_FX) { 746 XL_SEL_WIN(3); 747 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); 748 } else { 749 XL_SEL_WIN(3); 750 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, 751 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); 752 } 753 754 if (IFM_SUBTYPE(media) == IFM_10_2) 755 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 756 else 757 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 758 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); 759 XL_SEL_WIN(4); 760 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); 761 DELAY(800); 762 XL_SEL_WIN(7); 763 } 764 765 void 766 xl_reset(struct xl_softc *sc) 767 { 768 int i; 769 770 XL_SEL_WIN(0); 771 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 772 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? 773 XL_RESETOPT_DISADVFD:0)); 774 775 /* 776 * Pause briefly after issuing the reset command before trying 777 * to access any other registers. With my 3c575C cardbus card, 778 * failing to do this results in the system locking up while 779 * trying to poll the command busy bit in the status register. 780 */ 781 DELAY(100000); 782 783 for (i = 0; i < XL_TIMEOUT; i++) { 784 DELAY(10); 785 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) 786 break; 787 } 788 789 if (i == XL_TIMEOUT) 790 printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname); 791 792 /* Note: the RX reset takes an absurd amount of time 793 * on newer versions of the Tornado chips such as those 794 * on the 3c905CX and newer 3c908C cards. We wait an 795 * extra amount of time so that xl_wait() doesn't complain 796 * and annoy the users. 797 */ 798 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 799 DELAY(100000); 800 xl_wait(sc); 801 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 802 xl_wait(sc); 803 804 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || 805 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { 806 XL_SEL_WIN(2); 807 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, 808 XL_W2_RESET_OPTIONS) 809 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0) 810 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0) 811 ); 812 } 813 814 /* Wait a little while for the chip to get its brains in order. */ 815 DELAY(100000); 816 } 817 818 /* 819 * This routine is a kludge to work around possible hardware faults 820 * or manufacturing defects that can cause the media options register 821 * (or reset options register, as it's called for the first generation 822 * 3c90x adapters) to return an incorrect result. I have encountered 823 * one Dell Latitude laptop docking station with an integrated 3c905-TX 824 * which doesn't have any of the 'mediaopt' bits set. This screws up 825 * the attach routine pretty badly because it doesn't know what media 826 * to look for. If we find ourselves in this predicament, this routine 827 * will try to guess the media options values and warn the user of a 828 * possible manufacturing defect with his adapter/system/whatever. 829 */ 830 void 831 xl_mediacheck(struct xl_softc *sc) 832 { 833 /* 834 * If some of the media options bits are set, assume they are 835 * correct. If not, try to figure it out down below. 836 * XXX I should check for 10baseFL, but I don't have an adapter 837 * to test with. 838 */ 839 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { 840 /* 841 * Check the XCVR value. If it's not in the normal range 842 * of values, we need to fake it up here. 843 */ 844 if (sc->xl_xcvr <= XL_XCVR_AUTO) 845 return; 846 else { 847 printf("%s: bogus xcvr value " 848 "in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr); 849 printf("%s: choosing new default based " 850 "on card type\n", sc->sc_dev.dv_xname); 851 } 852 } else { 853 if (sc->xl_type == XL_TYPE_905B && 854 sc->xl_media & XL_MEDIAOPT_10FL) 855 return; 856 printf("%s: WARNING: no media options bits set in " 857 "the media options register!!\n", sc->sc_dev.dv_xname); 858 printf("%s: this could be a manufacturing defect in " 859 "your adapter or system\n", sc->sc_dev.dv_xname); 860 printf("%s: attempting to guess media type; you " 861 "should probably consult your vendor\n", sc->sc_dev.dv_xname); 862 } 863 864 xl_choose_xcvr(sc, 1); 865 } 866 867 void 868 xl_choose_xcvr(struct xl_softc *sc, int verbose) 869 { 870 u_int16_t devid; 871 872 /* 873 * Read the device ID from the EEPROM. 874 * This is what's loaded into the PCI device ID register, so it has 875 * to be correct otherwise we wouldn't have gotten this far. 876 */ 877 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); 878 879 switch(devid) { 880 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ 881 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ 882 sc->xl_media = XL_MEDIAOPT_BT; 883 sc->xl_xcvr = XL_XCVR_10BT; 884 if (verbose) 885 printf("%s: guessing 10BaseT transceiver\n", 886 sc->sc_dev.dv_xname); 887 break; 888 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ 889 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ 890 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 891 sc->xl_xcvr = XL_XCVR_10BT; 892 if (verbose) 893 printf("%s: guessing COMBO (AUI/BNC/TP)\n", 894 sc->sc_dev.dv_xname); 895 break; 896 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ 897 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; 898 sc->xl_xcvr = XL_XCVR_10BT; 899 if (verbose) 900 printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname); 901 break; 902 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ 903 sc->xl_media = XL_MEDIAOPT_10FL; 904 sc->xl_xcvr = XL_XCVR_AUI; 905 if (verbose) 906 printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname); 907 break; 908 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ 909 case TC_DEVICEID_HURRICANE_555: /* 3c555 */ 910 case TC_DEVICEID_HURRICANE_556: /* 3c556 */ 911 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ 912 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ 913 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ 914 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ 915 case TC_DEVICEID_HURRICANE_656: /* 3c656 */ 916 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ 917 case TC_DEVICEID_TORNADO_656C: /* 3c656C */ 918 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ 919 sc->xl_media = XL_MEDIAOPT_MII; 920 sc->xl_xcvr = XL_XCVR_MII; 921 if (verbose) 922 printf("%s: guessing MII\n", sc->sc_dev.dv_xname); 923 break; 924 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ 925 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ 926 sc->xl_media = XL_MEDIAOPT_BT4; 927 sc->xl_xcvr = XL_XCVR_MII; 928 if (verbose) 929 printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname); 930 break; 931 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ 932 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */ 933 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ 934 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ 935 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ 936 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ 937 sc->xl_media = XL_MEDIAOPT_BTX; 938 sc->xl_xcvr = XL_XCVR_AUTO; 939 if (verbose) 940 printf("%s: guessing 10/100 internal\n", 941 sc->sc_dev.dv_xname); 942 break; 943 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ 944 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; 945 sc->xl_xcvr = XL_XCVR_AUTO; 946 if (verbose) 947 printf("%s: guessing 10/100 plus BNC/AUI\n", 948 sc->sc_dev.dv_xname); 949 break; 950 default: 951 printf("%s: unknown device ID: %x -- " 952 "defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid); 953 sc->xl_media = XL_MEDIAOPT_BT; 954 break; 955 } 956 } 957 958 /* 959 * Initialize the transmit descriptors. 960 */ 961 int 962 xl_list_tx_init(struct xl_softc *sc) 963 { 964 struct xl_chain_data *cd; 965 struct xl_list_data *ld; 966 int i; 967 968 cd = &sc->xl_cdata; 969 ld = sc->xl_ldata; 970 for (i = 0; i < XL_TX_LIST_CNT; i++) { 971 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 972 if (i == (XL_TX_LIST_CNT - 1)) 973 cd->xl_tx_chain[i].xl_next = NULL; 974 else 975 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; 976 } 977 978 cd->xl_tx_free = &cd->xl_tx_chain[0]; 979 cd->xl_tx_tail = cd->xl_tx_head = NULL; 980 981 return (0); 982 } 983 984 /* 985 * Initialize the transmit descriptors. 986 */ 987 int 988 xl_list_tx_init_90xB(struct xl_softc *sc) 989 { 990 struct xl_chain_data *cd; 991 struct xl_list_data *ld; 992 int i, next, prev; 993 994 cd = &sc->xl_cdata; 995 ld = sc->xl_ldata; 996 for (i = 0; i < XL_TX_LIST_CNT; i++) { 997 if (i == (XL_TX_LIST_CNT - 1)) 998 next = 0; 999 else 1000 next = i + 1; 1001 if (i == 0) 1002 prev = XL_TX_LIST_CNT - 1; 1003 else 1004 prev = i - 1; 1005 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; 1006 cd->xl_tx_chain[i].xl_phys = 1007 sc->sc_listmap->dm_segs[0].ds_addr + 1008 offsetof(struct xl_list_data, xl_tx_list[i]); 1009 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next]; 1010 cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev]; 1011 } 1012 1013 bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT); 1014 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); 1015 1016 cd->xl_tx_prod = 1; 1017 cd->xl_tx_cons = 1; 1018 cd->xl_tx_cnt = 0; 1019 1020 return (0); 1021 } 1022 1023 /* 1024 * Initialize the RX descriptors and allocate mbufs for them. Note that 1025 * we arrange the descriptors in a closed ring, so that the last descriptor 1026 * points back to the first. 1027 */ 1028 int 1029 xl_list_rx_init(struct xl_softc *sc) 1030 { 1031 struct xl_chain_data *cd; 1032 struct xl_list_data *ld; 1033 int i, n; 1034 bus_addr_t next; 1035 1036 cd = &sc->xl_cdata; 1037 ld = sc->xl_ldata; 1038 1039 for (i = 0; i < XL_RX_LIST_CNT; i++) { 1040 cd->xl_rx_chain[i].xl_ptr = 1041 (struct xl_list_onefrag *)&ld->xl_rx_list[i]; 1042 if (i == (XL_RX_LIST_CNT - 1)) 1043 n = 0; 1044 else 1045 n = i + 1; 1046 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n]; 1047 next = sc->sc_listmap->dm_segs[0].ds_addr + 1048 offsetof(struct xl_list_data, xl_rx_list[n]); 1049 ld->xl_rx_list[i].xl_next = htole32(next); 1050 } 1051 1052 cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0]; 1053 if_rxr_init(&cd->xl_rx_ring, 2, XL_RX_LIST_CNT - 1); 1054 xl_fill_rx_ring(sc); 1055 return (0); 1056 } 1057 1058 void 1059 xl_fill_rx_ring(struct xl_softc *sc) 1060 { 1061 struct xl_chain_data *cd; 1062 u_int slots; 1063 1064 cd = &sc->xl_cdata; 1065 1066 for (slots = if_rxr_get(&cd->xl_rx_ring, XL_RX_LIST_CNT); 1067 slots > 0; slots--) { 1068 if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS) 1069 break; 1070 cd->xl_rx_prod = cd->xl_rx_prod->xl_next; 1071 } 1072 if_rxr_put(&cd->xl_rx_ring, slots); 1073 } 1074 1075 /* 1076 * Initialize an RX descriptor and attach an MBUF cluster. 1077 */ 1078 int 1079 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) 1080 { 1081 struct mbuf *m_new = NULL; 1082 bus_dmamap_t map; 1083 1084 m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1085 if (!m_new) 1086 return (ENOBUFS); 1087 1088 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1089 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap, 1090 mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) { 1091 m_freem(m_new); 1092 return (ENOBUFS); 1093 } 1094 1095 /* sync the old map, and unload it (if necessary) */ 1096 if (c->map->dm_nsegs != 0) { 1097 bus_dmamap_sync(sc->sc_dmat, c->map, 1098 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1099 bus_dmamap_unload(sc->sc_dmat, c->map); 1100 } 1101 1102 map = c->map; 1103 c->map = sc->sc_rx_sparemap; 1104 sc->sc_rx_sparemap = map; 1105 1106 /* Force longword alignment for packet payload. */ 1107 m_adj(m_new, ETHER_ALIGN); 1108 1109 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1110 BUS_DMASYNC_PREREAD); 1111 1112 c->xl_mbuf = m_new; 1113 c->xl_ptr->xl_frag.xl_addr = 1114 htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN); 1115 c->xl_ptr->xl_frag.xl_len = 1116 htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG); 1117 c->xl_ptr->xl_status = htole32(0); 1118 1119 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1120 ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list), 1121 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1122 1123 return (0); 1124 } 1125 1126 /* 1127 * A frame has been uploaded: pass the resulting mbuf chain up to 1128 * the higher level protocols. 1129 */ 1130 void 1131 xl_rxeof(struct xl_softc *sc) 1132 { 1133 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1134 struct mbuf *m; 1135 struct ifnet *ifp; 1136 struct xl_chain_onefrag *cur_rx; 1137 int total_len = 0; 1138 u_int32_t rxstat; 1139 u_int16_t sumflags = 0; 1140 1141 ifp = &sc->sc_arpcom.ac_if; 1142 1143 again: 1144 1145 while (if_rxr_inuse(&sc->xl_cdata.xl_rx_ring) > 0) { 1146 cur_rx = sc->xl_cdata.xl_rx_cons; 1147 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1148 ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva), 1149 sizeof(struct xl_list), 1150 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1151 if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0) 1152 break; 1153 m = cur_rx->xl_mbuf; 1154 cur_rx->xl_mbuf = NULL; 1155 sc->xl_cdata.xl_rx_cons = cur_rx->xl_next; 1156 if_rxr_put(&sc->xl_cdata.xl_rx_ring, 1); 1157 total_len = rxstat & XL_RXSTAT_LENMASK; 1158 1159 /* 1160 * Since we have told the chip to allow large frames, 1161 * we need to trap giant frame errors in software. We allow 1162 * a little more than the normal frame size to account for 1163 * frames with VLAN tags. 1164 */ 1165 if (total_len > XL_MAX_FRAMELEN) 1166 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); 1167 1168 /* 1169 * If an error occurs, update stats, clear the 1170 * status word and leave the mbuf cluster in place: 1171 * it should simply get re-used next time this descriptor 1172 * comes up in the ring. 1173 */ 1174 if (rxstat & XL_RXSTAT_UP_ERROR) { 1175 ifp->if_ierrors++; 1176 cur_rx->xl_ptr->xl_status = htole32(0); 1177 m_freem(m); 1178 continue; 1179 } 1180 1181 /* 1182 * If the error bit was not set, the upload complete 1183 * bit should be set which means we have a valid packet. 1184 * If not, something truly strange has happened. 1185 */ 1186 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { 1187 printf("%s: bad receive status -- " 1188 "packet dropped\n", sc->sc_dev.dv_xname); 1189 ifp->if_ierrors++; 1190 cur_rx->xl_ptr->xl_status = htole32(0); 1191 m_freem(m); 1192 continue; 1193 } 1194 1195 m->m_pkthdr.len = m->m_len = total_len; 1196 1197 if (sc->xl_type == XL_TYPE_905B) { 1198 if (!(rxstat & XL_RXSTAT_IPCKERR) && 1199 (rxstat & XL_RXSTAT_IPCKOK)) 1200 sumflags |= M_IPV4_CSUM_IN_OK; 1201 1202 if (!(rxstat & XL_RXSTAT_TCPCKERR) && 1203 (rxstat & XL_RXSTAT_TCPCKOK)) 1204 sumflags |= M_TCP_CSUM_IN_OK; 1205 1206 if (!(rxstat & XL_RXSTAT_UDPCKERR) && 1207 (rxstat & XL_RXSTAT_UDPCKOK)) 1208 sumflags |= M_UDP_CSUM_IN_OK; 1209 1210 m->m_pkthdr.csum_flags = sumflags; 1211 } 1212 1213 ml_enqueue(&ml, m); 1214 } 1215 1216 if (ifiq_input(&ifp->if_rcv, &ml)) 1217 if_rxr_livelocked(&sc->xl_cdata.xl_rx_ring); 1218 1219 xl_fill_rx_ring(sc); 1220 1221 /* 1222 * Handle the 'end of channel' condition. When the upload 1223 * engine hits the end of the RX ring, it will stall. This 1224 * is our cue to flush the RX ring, reload the uplist pointer 1225 * register and unstall the engine. 1226 * XXX This is actually a little goofy. With the ThunderLAN 1227 * chip, you get an interrupt when the receiver hits the end 1228 * of the receive ring, which tells you exactly when you 1229 * you need to reload the ring pointer. Here we have to 1230 * fake it. I'm mad at myself for not being clever enough 1231 * to avoid the use of a goto here. 1232 */ 1233 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || 1234 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { 1235 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 1236 xl_wait(sc); 1237 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 1238 xl_fill_rx_ring(sc); 1239 goto again; 1240 } 1241 } 1242 1243 /* 1244 * A frame was downloaded to the chip. It's safe for us to clean up 1245 * the list buffers. 1246 */ 1247 void 1248 xl_txeof(struct xl_softc *sc) 1249 { 1250 struct xl_chain *cur_tx; 1251 struct ifnet *ifp; 1252 1253 ifp = &sc->sc_arpcom.ac_if; 1254 1255 /* 1256 * Go through our tx list and free mbufs for those 1257 * frames that have been uploaded. Note: the 3c905B 1258 * sets a special bit in the status word to let us 1259 * know that a frame has been downloaded, but the 1260 * original 3c900/3c905 adapters don't do that. 1261 * Consequently, we have to use a different test if 1262 * xl_type != XL_TYPE_905B. 1263 */ 1264 while (sc->xl_cdata.xl_tx_head != NULL) { 1265 cur_tx = sc->xl_cdata.xl_tx_head; 1266 1267 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1268 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva), 1269 sizeof(struct xl_list), 1270 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1271 1272 if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1273 break; 1274 1275 sc->xl_cdata.xl_tx_head = cur_tx->xl_next; 1276 if (cur_tx->map->dm_nsegs != 0) { 1277 bus_dmamap_t map = cur_tx->map; 1278 1279 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1280 BUS_DMASYNC_POSTWRITE); 1281 bus_dmamap_unload(sc->sc_dmat, map); 1282 } 1283 if (cur_tx->xl_mbuf != NULL) { 1284 m_freem(cur_tx->xl_mbuf); 1285 cur_tx->xl_mbuf = NULL; 1286 } 1287 cur_tx->xl_next = sc->xl_cdata.xl_tx_free; 1288 sc->xl_cdata.xl_tx_free = cur_tx; 1289 } 1290 1291 if (sc->xl_cdata.xl_tx_head == NULL) { 1292 ifq_clr_oactive(&ifp->if_snd); 1293 /* Clear the timeout timer. */ 1294 ifp->if_timer = 0; 1295 sc->xl_cdata.xl_tx_tail = NULL; 1296 } else { 1297 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || 1298 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { 1299 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1300 sc->sc_listmap->dm_segs[0].ds_addr + 1301 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1302 sc->sc_listkva)); 1303 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1304 } 1305 } 1306 } 1307 1308 void 1309 xl_txeof_90xB(struct xl_softc *sc) 1310 { 1311 struct xl_chain *cur_tx = NULL; 1312 struct ifnet *ifp; 1313 int idx; 1314 1315 ifp = &sc->sc_arpcom.ac_if; 1316 1317 idx = sc->xl_cdata.xl_tx_cons; 1318 while (idx != sc->xl_cdata.xl_tx_prod) { 1319 1320 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1321 1322 if ((cur_tx->xl_ptr->xl_status & 1323 htole32(XL_TXSTAT_DL_COMPLETE)) == 0) 1324 break; 1325 1326 if (cur_tx->xl_mbuf != NULL) { 1327 m_freem(cur_tx->xl_mbuf); 1328 cur_tx->xl_mbuf = NULL; 1329 } 1330 1331 if (cur_tx->map->dm_nsegs != 0) { 1332 bus_dmamap_sync(sc->sc_dmat, cur_tx->map, 1333 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1334 bus_dmamap_unload(sc->sc_dmat, cur_tx->map); 1335 } 1336 1337 sc->xl_cdata.xl_tx_cnt--; 1338 XL_INC(idx, XL_TX_LIST_CNT); 1339 } 1340 1341 sc->xl_cdata.xl_tx_cons = idx; 1342 1343 if (cur_tx != NULL) 1344 ifq_clr_oactive(&ifp->if_snd); 1345 if (sc->xl_cdata.xl_tx_cnt == 0) 1346 ifp->if_timer = 0; 1347 } 1348 1349 /* 1350 * TX 'end of channel' interrupt handler. Actually, we should 1351 * only get a 'TX complete' interrupt if there's a transmit error, 1352 * so this is really TX error handler. 1353 */ 1354 void 1355 xl_txeoc(struct xl_softc *sc) 1356 { 1357 u_int8_t txstat; 1358 1359 while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { 1360 if (txstat & XL_TXSTATUS_UNDERRUN || 1361 txstat & XL_TXSTATUS_JABBER || 1362 txstat & XL_TXSTATUS_RECLAIM) { 1363 if (txstat != 0x90) { 1364 printf("%s: transmission error: %x\n", 1365 sc->sc_dev.dv_xname, txstat); 1366 } 1367 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1368 xl_wait(sc); 1369 if (sc->xl_type == XL_TYPE_905B) { 1370 if (sc->xl_cdata.xl_tx_cnt) { 1371 int i; 1372 struct xl_chain *c; 1373 1374 i = sc->xl_cdata.xl_tx_cons; 1375 c = &sc->xl_cdata.xl_tx_chain[i]; 1376 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1377 c->xl_phys); 1378 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 1379 } 1380 } else { 1381 if (sc->xl_cdata.xl_tx_head != NULL) 1382 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1383 sc->sc_listmap->dm_segs[0].ds_addr + 1384 ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr - 1385 sc->sc_listkva)); 1386 } 1387 /* 1388 * Remember to set this for the 1389 * first generation 3c90X chips. 1390 */ 1391 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1392 if (txstat & XL_TXSTATUS_UNDERRUN && 1393 sc->xl_tx_thresh < XL_PACKET_SIZE) { 1394 sc->xl_tx_thresh += XL_MIN_FRAMELEN; 1395 #ifdef notdef 1396 printf("%s: tx underrun, increasing tx start" 1397 " threshold to %d\n", sc->sc_dev.dv_xname, 1398 sc->xl_tx_thresh); 1399 #endif 1400 } 1401 CSR_WRITE_2(sc, XL_COMMAND, 1402 XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1403 if (sc->xl_type == XL_TYPE_905B) { 1404 CSR_WRITE_2(sc, XL_COMMAND, 1405 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1406 } 1407 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1408 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1409 } else { 1410 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 1411 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1412 } 1413 /* 1414 * Write an arbitrary byte to the TX_STATUS register 1415 * to clear this interrupt/error and advance to the next. 1416 */ 1417 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); 1418 } 1419 } 1420 1421 int 1422 xl_intr(void *arg) 1423 { 1424 struct xl_softc *sc; 1425 struct ifnet *ifp; 1426 u_int16_t status; 1427 int claimed = 0; 1428 1429 sc = arg; 1430 ifp = &sc->sc_arpcom.ac_if; 1431 1432 while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) { 1433 1434 claimed = 1; 1435 1436 CSR_WRITE_2(sc, XL_COMMAND, 1437 XL_CMD_INTR_ACK|(status & XL_INTRS)); 1438 1439 if (sc->intr_ack) 1440 (*sc->intr_ack)(sc); 1441 1442 if (!(ifp->if_flags & IFF_RUNNING)) 1443 return (claimed); 1444 1445 if (status & XL_STAT_UP_COMPLETE) 1446 xl_rxeof(sc); 1447 1448 if (status & XL_STAT_DOWN_COMPLETE) { 1449 if (sc->xl_type == XL_TYPE_905B) 1450 xl_txeof_90xB(sc); 1451 else 1452 xl_txeof(sc); 1453 } 1454 1455 if (status & XL_STAT_TX_COMPLETE) { 1456 ifp->if_oerrors++; 1457 xl_txeoc(sc); 1458 } 1459 1460 if (status & XL_STAT_ADFAIL) 1461 xl_init(sc); 1462 1463 if (status & XL_STAT_STATSOFLOW) { 1464 sc->xl_stats_no_timeout = 1; 1465 xl_stats_update(sc); 1466 sc->xl_stats_no_timeout = 0; 1467 } 1468 } 1469 1470 if (!ifq_empty(&ifp->if_snd)) 1471 (*ifp->if_start)(ifp); 1472 1473 return (claimed); 1474 } 1475 1476 void 1477 xl_stats_update(void *xsc) 1478 { 1479 struct xl_softc *sc; 1480 struct ifnet *ifp; 1481 struct xl_stats xl_stats; 1482 u_int8_t *p; 1483 int i; 1484 struct mii_data *mii = NULL; 1485 1486 bzero(&xl_stats, sizeof(struct xl_stats)); 1487 1488 sc = xsc; 1489 ifp = &sc->sc_arpcom.ac_if; 1490 if (sc->xl_hasmii) 1491 mii = &sc->sc_mii; 1492 1493 p = (u_int8_t *)&xl_stats; 1494 1495 /* Read all the stats registers. */ 1496 XL_SEL_WIN(6); 1497 1498 for (i = 0; i < 16; i++) 1499 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); 1500 1501 ifp->if_ierrors += xl_stats.xl_rx_overrun; 1502 1503 ifp->if_collisions += xl_stats.xl_tx_multi_collision + 1504 xl_stats.xl_tx_single_collision + 1505 xl_stats.xl_tx_late_collision; 1506 1507 /* 1508 * Boomerang and cyclone chips have an extra stats counter 1509 * in window 4 (BadSSD). We have to read this too in order 1510 * to clear out all the stats registers and avoid a statsoflow 1511 * interrupt. 1512 */ 1513 XL_SEL_WIN(4); 1514 CSR_READ_1(sc, XL_W4_BADSSD); 1515 1516 if (mii != NULL && (!sc->xl_stats_no_timeout)) 1517 mii_tick(mii); 1518 1519 XL_SEL_WIN(7); 1520 1521 if (!sc->xl_stats_no_timeout) 1522 timeout_add_sec(&sc->xl_stsup_tmo, 1); 1523 } 1524 1525 /* 1526 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1527 * pointers to the fragment pointers. 1528 */ 1529 int 1530 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head) 1531 { 1532 int error, frag, total_len; 1533 u_int32_t status; 1534 bus_dmamap_t map; 1535 1536 map = sc->sc_tx_sparemap; 1537 1538 reload: 1539 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1540 m_head, BUS_DMA_NOWAIT); 1541 1542 if (error && error != EFBIG) { 1543 m_freem(m_head); 1544 return (1); 1545 } 1546 1547 /* 1548 * Start packing the mbufs in this chain into 1549 * the fragment pointers. Stop when we run out 1550 * of fragments or hit the end of the mbuf chain. 1551 */ 1552 for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) { 1553 if (frag == XL_MAXFRAGS) 1554 break; 1555 total_len += map->dm_segs[frag].ds_len; 1556 c->xl_ptr->xl_frag[frag].xl_addr = 1557 htole32(map->dm_segs[frag].ds_addr); 1558 c->xl_ptr->xl_frag[frag].xl_len = 1559 htole32(map->dm_segs[frag].ds_len); 1560 } 1561 1562 /* 1563 * Handle special case: we used up all 63 fragments, 1564 * but we have more mbufs left in the chain. Copy the 1565 * data into an mbuf cluster. Note that we don't 1566 * bother clearing the values in the other fragment 1567 * pointers/counters; it wouldn't gain us anything, 1568 * and would waste cycles. 1569 */ 1570 if (error) { 1571 struct mbuf *m_new = NULL; 1572 1573 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1574 if (m_new == NULL) { 1575 m_freem(m_head); 1576 return (1); 1577 } 1578 if (m_head->m_pkthdr.len > MHLEN) { 1579 MCLGET(m_new, M_DONTWAIT); 1580 if (!(m_new->m_flags & M_EXT)) { 1581 m_freem(m_new); 1582 m_freem(m_head); 1583 return (1); 1584 } 1585 } 1586 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1587 mtod(m_new, caddr_t)); 1588 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1589 m_freem(m_head); 1590 m_head = m_new; 1591 goto reload; 1592 } 1593 1594 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1595 BUS_DMASYNC_PREWRITE); 1596 1597 if (c->map->dm_nsegs != 0) { 1598 bus_dmamap_sync(sc->sc_dmat, c->map, 1599 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1600 bus_dmamap_unload(sc->sc_dmat, c->map); 1601 } 1602 1603 c->xl_mbuf = m_head; 1604 sc->sc_tx_sparemap = c->map; 1605 c->map = map; 1606 c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG); 1607 c->xl_ptr->xl_status = htole32(total_len); 1608 c->xl_ptr->xl_next = 0; 1609 1610 if (sc->xl_type == XL_TYPE_905B) { 1611 status = XL_TXSTAT_RND_DEFEAT; 1612 1613 #ifndef XL905B_TXCSUM_BROKEN 1614 if (m_head->m_pkthdr.csum_flags) { 1615 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1616 status |= XL_TXSTAT_IPCKSUM; 1617 if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1618 status |= XL_TXSTAT_TCPCKSUM; 1619 if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1620 status |= XL_TXSTAT_UDPCKSUM; 1621 } 1622 #endif 1623 c->xl_ptr->xl_status = htole32(status); 1624 } 1625 1626 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1627 offsetof(struct xl_list_data, xl_tx_list[0]), 1628 sizeof(struct xl_list) * XL_TX_LIST_CNT, 1629 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1630 1631 return (0); 1632 } 1633 1634 /* 1635 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1636 * to the mbuf data regions directly in the transmit lists. We also save a 1637 * copy of the pointers since the transmit list fragment pointers are 1638 * physical addresses. 1639 */ 1640 void 1641 xl_start(struct ifnet *ifp) 1642 { 1643 struct xl_softc *sc; 1644 struct mbuf *m_head = NULL; 1645 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1646 struct xl_chain *prev_tx; 1647 int error; 1648 1649 sc = ifp->if_softc; 1650 1651 /* 1652 * Check for an available queue slot. If there are none, 1653 * punt. 1654 */ 1655 if (sc->xl_cdata.xl_tx_free == NULL) { 1656 xl_txeoc(sc); 1657 xl_txeof(sc); 1658 if (sc->xl_cdata.xl_tx_free == NULL) { 1659 ifq_set_oactive(&ifp->if_snd); 1660 return; 1661 } 1662 } 1663 1664 start_tx = sc->xl_cdata.xl_tx_free; 1665 1666 while (sc->xl_cdata.xl_tx_free != NULL) { 1667 m_head = ifq_dequeue(&ifp->if_snd); 1668 if (m_head == NULL) 1669 break; 1670 1671 /* Pick a descriptor off the free list. */ 1672 prev_tx = cur_tx; 1673 cur_tx = sc->xl_cdata.xl_tx_free; 1674 1675 /* Pack the data into the descriptor. */ 1676 error = xl_encap(sc, cur_tx, m_head); 1677 if (error) { 1678 cur_tx = prev_tx; 1679 continue; 1680 } 1681 1682 sc->xl_cdata.xl_tx_free = cur_tx->xl_next; 1683 cur_tx->xl_next = NULL; 1684 1685 /* Chain it together. */ 1686 if (prev != NULL) { 1687 prev->xl_next = cur_tx; 1688 prev->xl_ptr->xl_next = 1689 sc->sc_listmap->dm_segs[0].ds_addr + 1690 ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva); 1691 1692 } 1693 prev = cur_tx; 1694 1695 #if NBPFILTER > 0 1696 /* 1697 * If there's a BPF listener, bounce a copy of this frame 1698 * to him. 1699 */ 1700 if (ifp->if_bpf) 1701 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf, 1702 BPF_DIRECTION_OUT); 1703 #endif 1704 } 1705 1706 /* 1707 * If there are no packets queued, bail. 1708 */ 1709 if (cur_tx == NULL) 1710 return; 1711 1712 /* 1713 * Place the request for the upload interrupt 1714 * in the last descriptor in the chain. This way, if 1715 * we're chaining several packets at once, we'll only 1716 * get an interrupt once for the whole chain rather than 1717 * once for each packet. 1718 */ 1719 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1720 1721 /* 1722 * Queue the packets. If the TX channel is clear, update 1723 * the downlist pointer register. 1724 */ 1725 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 1726 xl_wait(sc); 1727 1728 if (sc->xl_cdata.xl_tx_head != NULL) { 1729 sc->xl_cdata.xl_tx_tail->xl_next = start_tx; 1730 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = 1731 sc->sc_listmap->dm_segs[0].ds_addr + 1732 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva); 1733 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= 1734 htole32(~XL_TXSTAT_DL_INTR); 1735 sc->xl_cdata.xl_tx_tail = cur_tx; 1736 } else { 1737 sc->xl_cdata.xl_tx_head = start_tx; 1738 sc->xl_cdata.xl_tx_tail = cur_tx; 1739 } 1740 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) 1741 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1742 sc->sc_listmap->dm_segs[0].ds_addr + 1743 ((caddr_t)start_tx->xl_ptr - sc->sc_listkva)); 1744 1745 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1746 1747 XL_SEL_WIN(7); 1748 1749 /* 1750 * Set a timeout in case the chip goes out to lunch. 1751 */ 1752 ifp->if_timer = 5; 1753 1754 /* 1755 * XXX Under certain conditions, usually on slower machines 1756 * where interrupts may be dropped, it's possible for the 1757 * adapter to chew up all the buffers in the receive ring 1758 * and stall, without us being able to do anything about it. 1759 * To guard against this, we need to make a pass over the 1760 * RX queue to make sure there aren't any packets pending. 1761 * Doing it here means we can flush the receive ring at the 1762 * same time the chip is DMAing the transmit descriptors we 1763 * just gave it. 1764 * 1765 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) 1766 * nature of their chips in all their marketing literature; 1767 * we may as well take advantage of it. :) 1768 */ 1769 xl_rxeof(sc); 1770 } 1771 1772 void 1773 xl_start_90xB(struct ifnet *ifp) 1774 { 1775 struct xl_softc *sc; 1776 struct mbuf *m_head = NULL; 1777 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1778 struct xl_chain *prev_tx; 1779 int error, idx; 1780 1781 sc = ifp->if_softc; 1782 1783 if (ifq_is_oactive(&ifp->if_snd)) 1784 return; 1785 1786 idx = sc->xl_cdata.xl_tx_prod; 1787 start_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1788 1789 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) { 1790 1791 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { 1792 ifq_set_oactive(&ifp->if_snd); 1793 break; 1794 } 1795 1796 m_head = ifq_dequeue(&ifp->if_snd); 1797 if (m_head == NULL) 1798 break; 1799 1800 prev_tx = cur_tx; 1801 cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; 1802 1803 /* Pack the data into the descriptor. */ 1804 error = xl_encap(sc, cur_tx, m_head); 1805 if (error) { 1806 cur_tx = prev_tx; 1807 continue; 1808 } 1809 1810 /* Chain it together. */ 1811 if (prev != NULL) 1812 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); 1813 prev = cur_tx; 1814 1815 #if NBPFILTER > 0 1816 /* 1817 * If there's a BPF listener, bounce a copy of this frame 1818 * to him. 1819 */ 1820 if (ifp->if_bpf) 1821 bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf, 1822 BPF_DIRECTION_OUT); 1823 #endif 1824 1825 XL_INC(idx, XL_TX_LIST_CNT); 1826 sc->xl_cdata.xl_tx_cnt++; 1827 } 1828 1829 /* 1830 * If there are no packets queued, bail. 1831 */ 1832 if (cur_tx == NULL) 1833 return; 1834 1835 /* 1836 * Place the request for the upload interrupt 1837 * in the last descriptor in the chain. This way, if 1838 * we're chaining several packets at once, we'll only 1839 * get an interrupt once for the whole chain rather than 1840 * once for each packet. 1841 */ 1842 cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); 1843 1844 /* Start transmission */ 1845 sc->xl_cdata.xl_tx_prod = idx; 1846 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); 1847 1848 /* 1849 * Set a timeout in case the chip goes out to lunch. 1850 */ 1851 ifp->if_timer = 5; 1852 } 1853 1854 void 1855 xl_init(void *xsc) 1856 { 1857 struct xl_softc *sc = xsc; 1858 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1859 int s, i; 1860 struct mii_data *mii = NULL; 1861 1862 s = splnet(); 1863 1864 /* 1865 * Cancel pending I/O and free all RX/TX buffers. 1866 */ 1867 xl_stop(sc); 1868 1869 /* Reset the chip to a known state. */ 1870 xl_reset(sc); 1871 1872 if (sc->xl_hasmii) 1873 mii = &sc->sc_mii; 1874 1875 if (mii == NULL) { 1876 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 1877 xl_wait(sc); 1878 } 1879 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1880 xl_wait(sc); 1881 DELAY(10000); 1882 1883 /* Init our MAC address */ 1884 XL_SEL_WIN(2); 1885 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1886 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, 1887 sc->sc_arpcom.ac_enaddr[i]); 1888 } 1889 1890 /* Clear the station mask. */ 1891 for (i = 0; i < 3; i++) 1892 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); 1893 #ifdef notdef 1894 /* Reset TX and RX. */ 1895 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 1896 xl_wait(sc); 1897 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 1898 xl_wait(sc); 1899 #endif 1900 /* Init circular RX list. */ 1901 if (xl_list_rx_init(sc) == ENOBUFS) { 1902 printf("%s: initialization failed: no " 1903 "memory for rx buffers\n", sc->sc_dev.dv_xname); 1904 xl_stop(sc); 1905 splx(s); 1906 return; 1907 } 1908 1909 /* Init TX descriptors. */ 1910 if (sc->xl_type == XL_TYPE_905B) 1911 xl_list_tx_init_90xB(sc); 1912 else 1913 xl_list_tx_init(sc); 1914 1915 /* 1916 * Set the TX freethresh value. 1917 * Note that this has no effect on 3c905B "cyclone" 1918 * cards but is required for 3c900/3c905 "boomerang" 1919 * cards in order to enable the download engine. 1920 */ 1921 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); 1922 1923 /* Set the TX start threshold for best performance. */ 1924 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); 1925 1926 /* 1927 * If this is a 3c905B, also set the tx reclaim threshold. 1928 * This helps cut down on the number of tx reclaim errors 1929 * that could happen on a busy network. The chip multiplies 1930 * the register value by 16 to obtain the actual threshold 1931 * in bytes, so we divide by 16 when setting the value here. 1932 * The existing threshold value can be examined by reading 1933 * the register at offset 9 in window 5. 1934 */ 1935 if (sc->xl_type == XL_TYPE_905B) { 1936 CSR_WRITE_2(sc, XL_COMMAND, 1937 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); 1938 } 1939 1940 /* Program promiscuous mode and multicast filters. */ 1941 xl_iff(sc); 1942 1943 /* 1944 * Load the address of the RX list. We have to 1945 * stall the upload engine before we can manipulate 1946 * the uplist pointer register, then unstall it when 1947 * we're finished. We also have to wait for the 1948 * stall command to complete before proceeding. 1949 * Note that we have to do this after any RX resets 1950 * have completed since the uplist register is cleared 1951 * by a reset. 1952 */ 1953 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); 1954 xl_wait(sc); 1955 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr + 1956 offsetof(struct xl_list_data, xl_rx_list[0])); 1957 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); 1958 xl_wait(sc); 1959 1960 if (sc->xl_type == XL_TYPE_905B) { 1961 /* Set polling interval */ 1962 CSR_WRITE_1(sc, XL_DOWN_POLL, 64); 1963 /* Load the address of the TX list */ 1964 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); 1965 xl_wait(sc); 1966 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, 1967 sc->sc_listmap->dm_segs[0].ds_addr + 1968 offsetof(struct xl_list_data, xl_tx_list[0])); 1969 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); 1970 xl_wait(sc); 1971 } 1972 1973 /* 1974 * If the coax transceiver is on, make sure to enable 1975 * the DC-DC converter. 1976 */ 1977 XL_SEL_WIN(3); 1978 if (sc->xl_xcvr == XL_XCVR_COAX) 1979 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); 1980 else 1981 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 1982 1983 /* 1984 * increase packet size to allow reception of 802.1q or ISL packets. 1985 * For the 3c90x chip, set the 'allow large packets' bit in the MAC 1986 * control register. For 3c90xB/C chips, use the RX packet size 1987 * register. 1988 */ 1989 1990 if (sc->xl_type == XL_TYPE_905B) 1991 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); 1992 else { 1993 u_int8_t macctl; 1994 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); 1995 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; 1996 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); 1997 } 1998 1999 /* Clear out the stats counters. */ 2000 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2001 sc->xl_stats_no_timeout = 1; 2002 xl_stats_update(sc); 2003 sc->xl_stats_no_timeout = 0; 2004 XL_SEL_WIN(4); 2005 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); 2006 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); 2007 2008 /* 2009 * Enable interrupts. 2010 */ 2011 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); 2012 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); 2013 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); 2014 2015 if (sc->intr_ack) 2016 (*sc->intr_ack)(sc); 2017 2018 /* Set the RX early threshold */ 2019 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); 2020 CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); 2021 2022 /* Enable receiver and transmitter. */ 2023 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); 2024 xl_wait(sc); 2025 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2026 xl_wait(sc); 2027 2028 /* Restore state of BMCR */ 2029 if (mii != NULL) 2030 mii_mediachg(mii); 2031 2032 /* Select window 7 for normal operations. */ 2033 XL_SEL_WIN(7); 2034 2035 ifp->if_flags |= IFF_RUNNING; 2036 ifq_clr_oactive(&ifp->if_snd); 2037 2038 splx(s); 2039 2040 timeout_add_sec(&sc->xl_stsup_tmo, 1); 2041 } 2042 2043 /* 2044 * Set media options. 2045 */ 2046 int 2047 xl_ifmedia_upd(struct ifnet *ifp) 2048 { 2049 struct xl_softc *sc; 2050 struct ifmedia *ifm = NULL; 2051 struct mii_data *mii = NULL; 2052 2053 sc = ifp->if_softc; 2054 2055 if (sc->xl_hasmii) 2056 mii = &sc->sc_mii; 2057 if (mii == NULL) 2058 ifm = &sc->ifmedia; 2059 else 2060 ifm = &mii->mii_media; 2061 2062 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2063 case IFM_100_FX: 2064 case IFM_10_FL: 2065 case IFM_10_2: 2066 case IFM_10_5: 2067 xl_setmode(sc, ifm->ifm_media); 2068 return (0); 2069 break; 2070 default: 2071 break; 2072 } 2073 2074 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2075 || sc->xl_media & XL_MEDIAOPT_BT4) { 2076 xl_init(sc); 2077 } else { 2078 xl_setmode(sc, ifm->ifm_media); 2079 } 2080 2081 return (0); 2082 } 2083 2084 /* 2085 * Report current media status. 2086 */ 2087 void 2088 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2089 { 2090 struct xl_softc *sc; 2091 u_int32_t icfg; 2092 u_int16_t status = 0; 2093 struct mii_data *mii = NULL; 2094 2095 sc = ifp->if_softc; 2096 if (sc->xl_hasmii != 0) 2097 mii = &sc->sc_mii; 2098 2099 XL_SEL_WIN(4); 2100 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2101 2102 XL_SEL_WIN(3); 2103 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; 2104 icfg >>= XL_ICFG_CONNECTOR_BITS; 2105 2106 ifmr->ifm_active = IFM_ETHER; 2107 ifmr->ifm_status = IFM_AVALID; 2108 2109 if ((status & XL_MEDIASTAT_CARRIER) == 0) 2110 ifmr->ifm_status |= IFM_ACTIVE; 2111 2112 switch(icfg) { 2113 case XL_XCVR_10BT: 2114 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2115 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2116 ifmr->ifm_active |= IFM_FDX; 2117 else 2118 ifmr->ifm_active |= IFM_HDX; 2119 break; 2120 case XL_XCVR_AUI: 2121 if (sc->xl_type == XL_TYPE_905B && 2122 sc->xl_media == XL_MEDIAOPT_10FL) { 2123 ifmr->ifm_active = IFM_ETHER|IFM_10_FL; 2124 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) 2125 ifmr->ifm_active |= IFM_FDX; 2126 else 2127 ifmr->ifm_active |= IFM_HDX; 2128 } else 2129 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2130 break; 2131 case XL_XCVR_COAX: 2132 ifmr->ifm_active = IFM_ETHER|IFM_10_2; 2133 break; 2134 /* 2135 * XXX MII and BTX/AUTO should be separate cases. 2136 */ 2137 2138 case XL_XCVR_100BTX: 2139 case XL_XCVR_AUTO: 2140 case XL_XCVR_MII: 2141 if (mii != NULL) { 2142 mii_pollstat(mii); 2143 ifmr->ifm_active = mii->mii_media_active; 2144 ifmr->ifm_status = mii->mii_media_status; 2145 } 2146 break; 2147 case XL_XCVR_100BFX: 2148 ifmr->ifm_active = IFM_ETHER|IFM_100_FX; 2149 break; 2150 default: 2151 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg); 2152 break; 2153 } 2154 } 2155 2156 int 2157 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2158 { 2159 struct xl_softc *sc = ifp->if_softc; 2160 struct ifreq *ifr = (struct ifreq *)data; 2161 int s, error = 0; 2162 struct mii_data *mii = NULL; 2163 2164 s = splnet(); 2165 2166 switch(command) { 2167 case SIOCSIFADDR: 2168 ifp->if_flags |= IFF_UP; 2169 if (!(ifp->if_flags & IFF_RUNNING)) 2170 xl_init(sc); 2171 break; 2172 2173 case SIOCSIFFLAGS: 2174 if (ifp->if_flags & IFF_UP) { 2175 if (ifp->if_flags & IFF_RUNNING) 2176 error = ENETRESET; 2177 else 2178 xl_init(sc); 2179 } else { 2180 if (ifp->if_flags & IFF_RUNNING) 2181 xl_stop(sc); 2182 } 2183 break; 2184 2185 case SIOCGIFMEDIA: 2186 case SIOCSIFMEDIA: 2187 if (sc->xl_hasmii != 0) 2188 mii = &sc->sc_mii; 2189 if (mii == NULL) 2190 error = ifmedia_ioctl(ifp, ifr, 2191 &sc->ifmedia, command); 2192 else 2193 error = ifmedia_ioctl(ifp, ifr, 2194 &mii->mii_media, command); 2195 break; 2196 2197 case SIOCGIFRXR: 2198 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2199 NULL, MCLBYTES, &sc->xl_cdata.xl_rx_ring); 2200 break; 2201 2202 default: 2203 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2204 } 2205 2206 if (error == ENETRESET) { 2207 if (ifp->if_flags & IFF_RUNNING) 2208 xl_iff(sc); 2209 error = 0; 2210 } 2211 2212 splx(s); 2213 return (error); 2214 } 2215 2216 void 2217 xl_watchdog(struct ifnet *ifp) 2218 { 2219 struct xl_softc *sc; 2220 u_int16_t status = 0; 2221 2222 sc = ifp->if_softc; 2223 2224 ifp->if_oerrors++; 2225 XL_SEL_WIN(4); 2226 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); 2227 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2228 2229 if (status & XL_MEDIASTAT_CARRIER) 2230 printf("%s: no carrier - transceiver cable problem?\n", 2231 sc->sc_dev.dv_xname); 2232 xl_txeoc(sc); 2233 xl_txeof(sc); 2234 xl_rxeof(sc); 2235 xl_init(sc); 2236 2237 if (!ifq_empty(&ifp->if_snd)) 2238 (*ifp->if_start)(ifp); 2239 } 2240 2241 void 2242 xl_freetxrx(struct xl_softc *sc) 2243 { 2244 bus_dmamap_t map; 2245 int i; 2246 2247 /* 2248 * Free data in the RX lists. 2249 */ 2250 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2251 if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) { 2252 map = sc->xl_cdata.xl_rx_chain[i].map; 2253 2254 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2255 BUS_DMASYNC_POSTREAD); 2256 bus_dmamap_unload(sc->sc_dmat, map); 2257 } 2258 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { 2259 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); 2260 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; 2261 } 2262 } 2263 bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list)); 2264 /* 2265 * Free the TX list buffers. 2266 */ 2267 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2268 if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) { 2269 map = sc->xl_cdata.xl_tx_chain[i].map; 2270 2271 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2272 BUS_DMASYNC_POSTWRITE); 2273 bus_dmamap_unload(sc->sc_dmat, map); 2274 } 2275 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { 2276 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); 2277 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; 2278 } 2279 } 2280 bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list)); 2281 } 2282 2283 /* 2284 * Stop the adapter and free any mbufs allocated to the 2285 * RX and TX lists. 2286 */ 2287 void 2288 xl_stop(struct xl_softc *sc) 2289 { 2290 struct ifnet *ifp; 2291 2292 /* Stop the stats updater. */ 2293 timeout_del(&sc->xl_stsup_tmo); 2294 2295 ifp = &sc->sc_arpcom.ac_if; 2296 2297 ifp->if_flags &= ~IFF_RUNNING; 2298 ifq_clr_oactive(&ifp->if_snd); 2299 ifp->if_timer = 0; 2300 2301 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); 2302 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); 2303 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); 2304 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); 2305 xl_wait(sc); 2306 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); 2307 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); 2308 DELAY(800); 2309 2310 #ifdef foo 2311 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); 2312 xl_wait(sc); 2313 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); 2314 xl_wait(sc); 2315 #endif 2316 2317 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); 2318 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); 2319 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); 2320 2321 if (sc->intr_ack) 2322 (*sc->intr_ack)(sc); 2323 2324 xl_freetxrx(sc); 2325 } 2326 2327 #ifndef SMALL_KERNEL 2328 void 2329 xl_wol_power(struct xl_softc *sc) 2330 { 2331 /* Re-enable RX and call upper layer WOL power routine 2332 * if WOL is enabled. */ 2333 if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) { 2334 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); 2335 sc->wol_power(sc->wol_power_arg); 2336 } 2337 } 2338 #endif 2339 2340 void 2341 xl_attach(struct xl_softc *sc) 2342 { 2343 u_int8_t enaddr[ETHER_ADDR_LEN]; 2344 u_int16_t xcvr[2]; 2345 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2346 int i; 2347 uint64_t media = IFM_ETHER|IFM_100_TX|IFM_FDX; 2348 struct ifmedia *ifm; 2349 2350 i = splnet(); 2351 xl_reset(sc); 2352 splx(i); 2353 2354 /* 2355 * Get station address from the EEPROM. 2356 */ 2357 if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) { 2358 printf("\n%s: failed to read station address\n", 2359 sc->sc_dev.dv_xname); 2360 return; 2361 } 2362 memcpy(&sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN); 2363 2364 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data), 2365 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 2366 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { 2367 printf(": can't alloc list mem\n"); 2368 return; 2369 } 2370 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 2371 sizeof(struct xl_list_data), &sc->sc_listkva, 2372 BUS_DMA_NOWAIT) != 0) { 2373 printf(": can't map list mem\n"); 2374 return; 2375 } 2376 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1, 2377 sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT, 2378 &sc->sc_listmap) != 0) { 2379 printf(": can't alloc list map\n"); 2380 return; 2381 } 2382 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 2383 sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 2384 printf(": can't load list map\n"); 2385 return; 2386 } 2387 sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva; 2388 2389 for (i = 0; i < XL_RX_LIST_CNT; i++) { 2390 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 2391 0, BUS_DMA_NOWAIT, 2392 &sc->xl_cdata.xl_rx_chain[i].map) != 0) { 2393 printf(": can't create rx map\n"); 2394 return; 2395 } 2396 } 2397 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 2398 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 2399 printf(": can't create rx spare map\n"); 2400 return; 2401 } 2402 2403 for (i = 0; i < XL_TX_LIST_CNT; i++) { 2404 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 2405 XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT, 2406 &sc->xl_cdata.xl_tx_chain[i].map) != 0) { 2407 printf(": can't create tx map\n"); 2408 return; 2409 } 2410 } 2411 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3, 2412 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 2413 printf(": can't create tx spare map\n"); 2414 return; 2415 } 2416 2417 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 2418 2419 if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) { 2420 u_int16_t n; 2421 2422 XL_SEL_WIN(2); 2423 n = CSR_READ_2(sc, 12); 2424 2425 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR) 2426 n |= 0x0010; 2427 2428 if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) 2429 n |= 0x4000; 2430 2431 CSR_WRITE_2(sc, 12, n); 2432 } 2433 2434 /* 2435 * Figure out the card type. 3c905B adapters have the 2436 * 'supportsNoTxLength' bit set in the capabilities 2437 * word in the EEPROM. 2438 * Note: my 3c575C cardbus card lies. It returns a value 2439 * of 0x1578 for its capabilities word, which is somewhat 2440 * nonsensical. Another way to distinguish a 3c90x chip 2441 * from a 3c90xB/C chip is to check for the 'supportsLargePackets' 2442 * bit. This will only be set for 3c90x boomerage chips. 2443 */ 2444 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); 2445 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || 2446 !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) 2447 sc->xl_type = XL_TYPE_905B; 2448 else 2449 sc->xl_type = XL_TYPE_90X; 2450 2451 /* Set the TX start threshold for best performance. */ 2452 sc->xl_tx_thresh = XL_MIN_FRAMELEN; 2453 2454 timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc); 2455 2456 ifp->if_softc = sc; 2457 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2458 ifp->if_ioctl = xl_ioctl; 2459 if (sc->xl_type == XL_TYPE_905B) 2460 ifp->if_start = xl_start_90xB; 2461 else 2462 ifp->if_start = xl_start; 2463 ifp->if_watchdog = xl_watchdog; 2464 ifp->if_baudrate = 10000000; 2465 ifq_set_maxlen(&ifp->if_snd, XL_TX_LIST_CNT - 1); 2466 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 2467 2468 ifp->if_capabilities = IFCAP_VLAN_MTU; 2469 2470 #ifndef XL905B_TXCSUM_BROKEN 2471 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 2472 IFCAP_CSUM_UDPv4; 2473 #endif 2474 2475 XL_SEL_WIN(3); 2476 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); 2477 2478 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); 2479 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; 2480 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; 2481 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; 2482 2483 xl_mediacheck(sc); 2484 2485 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX 2486 || sc->xl_media & XL_MEDIAOPT_BT4) { 2487 ifmedia_init(&sc->sc_mii.mii_media, 0, 2488 xl_ifmedia_upd, xl_ifmedia_sts); 2489 sc->xl_hasmii = 1; 2490 sc->sc_mii.mii_ifp = ifp; 2491 sc->sc_mii.mii_readreg = xl_miibus_readreg; 2492 sc->sc_mii.mii_writereg = xl_miibus_writereg; 2493 sc->sc_mii.mii_statchg = xl_miibus_statchg; 2494 xl_setcfg(sc); 2495 mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff, 2496 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2497 2498 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2499 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 2500 0, NULL); 2501 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2502 } 2503 else { 2504 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2505 } 2506 ifm = &sc->sc_mii.mii_media; 2507 } 2508 else { 2509 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); 2510 sc->xl_hasmii = 0; 2511 ifm = &sc->ifmedia; 2512 } 2513 2514 /* 2515 * Sanity check. If the user has selected "auto" and this isn't 2516 * a 10/100 card of some kind, we need to force the transceiver 2517 * type to something sane. 2518 */ 2519 if (sc->xl_xcvr == XL_XCVR_AUTO) 2520 xl_choose_xcvr(sc, 0); 2521 2522 if (sc->xl_media & XL_MEDIAOPT_BT) { 2523 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL); 2524 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 2525 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2526 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2527 } 2528 2529 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { 2530 /* 2531 * Check for a 10baseFL board in disguise. 2532 */ 2533 if (sc->xl_type == XL_TYPE_905B && 2534 sc->xl_media == XL_MEDIAOPT_10FL) { 2535 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL); 2536 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 2537 0, NULL); 2538 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) 2539 ifmedia_add(ifm, 2540 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); 2541 } else { 2542 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL); 2543 } 2544 } 2545 2546 if (sc->xl_media & XL_MEDIAOPT_BNC) { 2547 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL); 2548 } 2549 2550 if (sc->xl_media & XL_MEDIAOPT_BFX) { 2551 ifp->if_baudrate = 100000000; 2552 ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL); 2553 } 2554 2555 /* Choose a default media. */ 2556 switch(sc->xl_xcvr) { 2557 case XL_XCVR_10BT: 2558 media = IFM_ETHER|IFM_10_T; 2559 xl_setmode(sc, media); 2560 break; 2561 case XL_XCVR_AUI: 2562 if (sc->xl_type == XL_TYPE_905B && 2563 sc->xl_media == XL_MEDIAOPT_10FL) { 2564 media = IFM_ETHER|IFM_10_FL; 2565 xl_setmode(sc, media); 2566 } else { 2567 media = IFM_ETHER|IFM_10_5; 2568 xl_setmode(sc, media); 2569 } 2570 break; 2571 case XL_XCVR_COAX: 2572 media = IFM_ETHER|IFM_10_2; 2573 xl_setmode(sc, media); 2574 break; 2575 case XL_XCVR_AUTO: 2576 case XL_XCVR_100BTX: 2577 case XL_XCVR_MII: 2578 /* Chosen by miibus */ 2579 break; 2580 case XL_XCVR_100BFX: 2581 media = IFM_ETHER|IFM_100_FX; 2582 xl_setmode(sc, media); 2583 break; 2584 default: 2585 printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, 2586 sc->xl_xcvr); 2587 /* 2588 * This will probably be wrong, but it prevents 2589 * the ifmedia code from panicking. 2590 */ 2591 media = IFM_ETHER | IFM_10_T; 2592 break; 2593 } 2594 2595 if (sc->xl_hasmii == 0) 2596 ifmedia_set(&sc->ifmedia, media); 2597 2598 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { 2599 XL_SEL_WIN(0); 2600 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); 2601 } 2602 2603 #ifndef SMALL_KERNEL 2604 /* Check availability of WOL. */ 2605 if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) { 2606 ifp->if_capabilities |= IFCAP_WOL; 2607 ifp->if_wol = xl_wol; 2608 xl_wol(ifp, 0); 2609 } 2610 #endif 2611 2612 /* 2613 * Call MI attach routines. 2614 */ 2615 if_attach(ifp); 2616 ether_ifattach(ifp); 2617 } 2618 2619 int 2620 xl_detach(struct xl_softc *sc) 2621 { 2622 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2623 extern void xl_freetxrx(struct xl_softc *); 2624 2625 /* Unhook our tick handler. */ 2626 timeout_del(&sc->xl_stsup_tmo); 2627 2628 xl_freetxrx(sc); 2629 2630 /* Detach all PHYs */ 2631 if (sc->xl_hasmii) 2632 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2633 2634 /* Delete all remaining media. */ 2635 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2636 2637 ether_ifdetach(ifp); 2638 if_detach(ifp); 2639 2640 return (0); 2641 } 2642 2643 #ifndef SMALL_KERNEL 2644 int 2645 xl_wol(struct ifnet *ifp, int enable) 2646 { 2647 struct xl_softc *sc = ifp->if_softc; 2648 2649 XL_SEL_WIN(7); 2650 if (enable) { 2651 if (!(ifp->if_flags & IFF_RUNNING)) 2652 xl_init(sc); 2653 CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC); 2654 sc->xl_flags |= XL_FLAG_WOL; 2655 } else { 2656 CSR_WRITE_2(sc, XL_W7_BM_PME, 0); 2657 sc->xl_flags &= ~XL_FLAG_WOL; 2658 } 2659 return (0); 2660 } 2661 #endif 2662 2663 struct cfdriver xl_cd = { 2664 0, "xl", DV_IFNET 2665 }; 2666