1 /* $OpenBSD: dc.c,v 1.124 2011/07/07 20:42:56 henning Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $ 35 */ 36 37 /* 38 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 39 * series chips and several workalikes including the following: 40 * 41 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 42 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 43 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 44 * ASIX Electronics AX88140A (www.asix.com.tw) 45 * ASIX Electronics AX88141 (www.asix.com.tw) 46 * ADMtek AL981 (www.admtek.com.tw) 47 * ADMtek AN983 (www.admtek.com.tw) 48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 49 * Accton EN1217, EN2242 (www.accton.com) 50 * Xircom X3201 (www.xircom.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64 /* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92 #include "bpfilter.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/mbuf.h> 97 #include <sys/protosw.h> 98 #include <sys/socket.h> 99 #include <sys/ioctl.h> 100 #include <sys/errno.h> 101 #include <sys/malloc.h> 102 #include <sys/kernel.h> 103 #include <sys/device.h> 104 #include <sys/timeout.h> 105 106 #include <net/if.h> 107 #include <net/if_dl.h> 108 #include <net/if_types.h> 109 110 #ifdef INET 111 #include <netinet/in.h> 112 #include <netinet/in_systm.h> 113 #include <netinet/in_var.h> 114 #include <netinet/ip.h> 115 #include <netinet/if_ether.h> 116 #endif 117 118 #include <net/if_media.h> 119 120 #if NBPFILTER > 0 121 #include <net/bpf.h> 122 #endif 123 124 #include <dev/mii/mii.h> 125 #include <dev/mii/miivar.h> 126 127 #include <machine/bus.h> 128 #include <dev/pci/pcidevs.h> 129 130 #include <dev/ic/dcreg.h> 131 132 int dc_intr(void *); 133 struct dc_type *dc_devtype(void *); 134 int dc_newbuf(struct dc_softc *, int, struct mbuf *); 135 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *); 136 int dc_coal(struct dc_softc *, struct mbuf **); 137 138 void dc_pnic_rx_bug_war(struct dc_softc *, int); 139 int dc_rx_resync(struct dc_softc *); 140 void dc_rxeof(struct dc_softc *); 141 void dc_txeof(struct dc_softc *); 142 void dc_tick(void *); 143 void dc_tx_underrun(struct dc_softc *); 144 void dc_start(struct ifnet *); 145 int dc_ioctl(struct ifnet *, u_long, caddr_t); 146 void dc_watchdog(struct ifnet *); 147 int dc_ifmedia_upd(struct ifnet *); 148 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149 150 void dc_delay(struct dc_softc *); 151 void dc_eeprom_width(struct dc_softc *); 152 void dc_eeprom_idle(struct dc_softc *); 153 void dc_eeprom_putbyte(struct dc_softc *, int); 154 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *); 155 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *); 156 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *); 157 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int); 158 159 void dc_mii_writebit(struct dc_softc *, int); 160 int dc_mii_readbit(struct dc_softc *); 161 void dc_mii_sync(struct dc_softc *); 162 void dc_mii_send(struct dc_softc *, u_int32_t, int); 163 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *); 164 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *); 165 int dc_miibus_readreg(struct device *, int, int); 166 void dc_miibus_writereg(struct device *, int, int, int); 167 void dc_miibus_statchg(struct device *); 168 169 void dc_setcfg(struct dc_softc *, int); 170 u_int32_t dc_crc_le(struct dc_softc *, caddr_t); 171 u_int32_t dc_crc_be(caddr_t); 172 void dc_setfilt_21143(struct dc_softc *); 173 void dc_setfilt_asix(struct dc_softc *); 174 void dc_setfilt_admtek(struct dc_softc *); 175 void dc_setfilt_xircom(struct dc_softc *); 176 177 void dc_setfilt(struct dc_softc *); 178 179 void dc_reset(struct dc_softc *); 180 int dc_list_rx_init(struct dc_softc *); 181 int dc_list_tx_init(struct dc_softc *); 182 183 void dc_read_srom(struct dc_softc *, int); 184 void dc_parse_21143_srom(struct dc_softc *); 185 void dc_decode_leaf_sia(struct dc_softc *, 186 struct dc_eblock_sia *); 187 void dc_decode_leaf_mii(struct dc_softc *, 188 struct dc_eblock_mii *); 189 void dc_decode_leaf_sym(struct dc_softc *, 190 struct dc_eblock_sym *); 191 void dc_apply_fixup(struct dc_softc *, int); 192 193 #define DC_SETBIT(sc, reg, x) \ 194 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 195 196 #define DC_CLRBIT(sc, reg, x) \ 197 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 198 199 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 200 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 201 202 void 203 dc_delay(struct dc_softc *sc) 204 { 205 int idx; 206 207 for (idx = (300 / 33) + 1; idx > 0; idx--) 208 CSR_READ_4(sc, DC_BUSCTL); 209 } 210 211 void 212 dc_eeprom_width(struct dc_softc *sc) 213 { 214 int i; 215 216 /* Force EEPROM to idle state. */ 217 dc_eeprom_idle(sc); 218 219 /* Enter EEPROM access mode. */ 220 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 221 dc_delay(sc); 222 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 223 dc_delay(sc); 224 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 225 dc_delay(sc); 226 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 227 dc_delay(sc); 228 229 for (i = 3; i--;) { 230 if (6 & (1 << i)) 231 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 232 else 233 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 234 dc_delay(sc); 235 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 236 dc_delay(sc); 237 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 238 dc_delay(sc); 239 } 240 241 for (i = 1; i <= 12; i++) { 242 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 243 dc_delay(sc); 244 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 245 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 246 dc_delay(sc); 247 break; 248 } 249 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 250 dc_delay(sc); 251 } 252 253 /* Turn off EEPROM access mode. */ 254 dc_eeprom_idle(sc); 255 256 if (i < 4 || i > 12) 257 sc->dc_romwidth = 6; 258 else 259 sc->dc_romwidth = i; 260 261 /* Enter EEPROM access mode. */ 262 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 263 dc_delay(sc); 264 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 265 dc_delay(sc); 266 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 267 dc_delay(sc); 268 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 269 dc_delay(sc); 270 271 /* Turn off EEPROM access mode. */ 272 dc_eeprom_idle(sc); 273 } 274 275 void 276 dc_eeprom_idle(struct dc_softc *sc) 277 { 278 int i; 279 280 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 281 dc_delay(sc); 282 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 283 dc_delay(sc); 284 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 285 dc_delay(sc); 286 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 287 dc_delay(sc); 288 289 for (i = 0; i < 25; i++) { 290 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 291 dc_delay(sc); 292 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 293 dc_delay(sc); 294 } 295 296 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 297 dc_delay(sc); 298 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 299 dc_delay(sc); 300 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 301 } 302 303 /* 304 * Send a read command and address to the EEPROM, check for ACK. 305 */ 306 void 307 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 308 { 309 int d, i; 310 311 d = DC_EECMD_READ >> 6; 312 313 for (i = 3; i--; ) { 314 if (d & (1 << i)) 315 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 316 else 317 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 318 dc_delay(sc); 319 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 320 dc_delay(sc); 321 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 322 dc_delay(sc); 323 } 324 325 /* 326 * Feed in each bit and strobe the clock. 327 */ 328 for (i = sc->dc_romwidth; i--;) { 329 if (addr & (1 << i)) { 330 SIO_SET(DC_SIO_EE_DATAIN); 331 } else { 332 SIO_CLR(DC_SIO_EE_DATAIN); 333 } 334 dc_delay(sc); 335 SIO_SET(DC_SIO_EE_CLK); 336 dc_delay(sc); 337 SIO_CLR(DC_SIO_EE_CLK); 338 dc_delay(sc); 339 } 340 } 341 342 /* 343 * Read a word of data stored in the EEPROM at address 'addr.' 344 * The PNIC 82c168/82c169 has its own non-standard way to read 345 * the EEPROM. 346 */ 347 void 348 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 349 { 350 int i; 351 u_int32_t r; 352 353 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 354 355 for (i = 0; i < DC_TIMEOUT; i++) { 356 DELAY(1); 357 r = CSR_READ_4(sc, DC_SIO); 358 if (!(r & DC_PN_SIOCTL_BUSY)) { 359 *dest = (u_int16_t)(r & 0xFFFF); 360 return; 361 } 362 } 363 } 364 365 /* 366 * Read a word of data stored in the EEPROM at address 'addr.' 367 * The Xircom X3201 has its own non-standard way to read 368 * the EEPROM, too. 369 */ 370 void 371 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 372 { 373 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 374 375 addr *= 2; 376 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 377 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; 378 addr += 1; 379 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 380 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; 381 382 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 383 } 384 385 /* 386 * Read a word of data stored in the EEPROM at address 'addr.' 387 */ 388 void 389 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 390 { 391 int i; 392 u_int16_t word = 0; 393 394 /* Force EEPROM to idle state. */ 395 dc_eeprom_idle(sc); 396 397 /* Enter EEPROM access mode. */ 398 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 399 dc_delay(sc); 400 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 401 dc_delay(sc); 402 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 403 dc_delay(sc); 404 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 405 dc_delay(sc); 406 407 /* 408 * Send address of word we want to read. 409 */ 410 dc_eeprom_putbyte(sc, addr); 411 412 /* 413 * Start reading bits from EEPROM. 414 */ 415 for (i = 0x8000; i; i >>= 1) { 416 SIO_SET(DC_SIO_EE_CLK); 417 dc_delay(sc); 418 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 419 word |= i; 420 dc_delay(sc); 421 SIO_CLR(DC_SIO_EE_CLK); 422 dc_delay(sc); 423 } 424 425 /* Turn off EEPROM access mode. */ 426 dc_eeprom_idle(sc); 427 428 *dest = word; 429 } 430 431 /* 432 * Read a sequence of words from the EEPROM. 433 */ 434 void 435 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, 436 int swap) 437 { 438 int i; 439 u_int16_t word = 0, *ptr; 440 441 for (i = 0; i < cnt; i++) { 442 if (DC_IS_PNIC(sc)) 443 dc_eeprom_getword_pnic(sc, off + i, &word); 444 else if (DC_IS_XIRCOM(sc)) 445 dc_eeprom_getword_xircom(sc, off + i, &word); 446 else 447 dc_eeprom_getword(sc, off + i, &word); 448 ptr = (u_int16_t *)(dest + (i * 2)); 449 if (swap) 450 *ptr = betoh16(word); 451 else 452 *ptr = letoh16(word); 453 } 454 } 455 456 /* 457 * The following two routines are taken from the Macronix 98713 458 * Application Notes pp.19-21. 459 */ 460 /* 461 * Write a bit to the MII bus. 462 */ 463 void 464 dc_mii_writebit(struct dc_softc *sc, int bit) 465 { 466 if (bit) 467 CSR_WRITE_4(sc, DC_SIO, 468 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 469 else 470 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 471 472 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 473 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 474 } 475 476 /* 477 * Read a bit from the MII bus. 478 */ 479 int 480 dc_mii_readbit(struct dc_softc *sc) 481 { 482 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 483 CSR_READ_4(sc, DC_SIO); 484 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 485 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 486 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 487 return (1); 488 return (0); 489 } 490 491 /* 492 * Sync the PHYs by setting data bit and strobing the clock 32 times. 493 */ 494 void 495 dc_mii_sync(struct dc_softc *sc) 496 { 497 int i; 498 499 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 500 501 for (i = 0; i < 32; i++) 502 dc_mii_writebit(sc, 1); 503 } 504 505 /* 506 * Clock a series of bits through the MII. 507 */ 508 void 509 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 510 { 511 int i; 512 513 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 514 dc_mii_writebit(sc, bits & i); 515 } 516 517 /* 518 * Read an PHY register through the MII. 519 */ 520 int 521 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 522 { 523 int i, ack, s; 524 525 s = splnet(); 526 527 /* 528 * Set up frame for RX. 529 */ 530 frame->mii_stdelim = DC_MII_STARTDELIM; 531 frame->mii_opcode = DC_MII_READOP; 532 frame->mii_turnaround = 0; 533 frame->mii_data = 0; 534 535 /* 536 * Sync the PHYs. 537 */ 538 dc_mii_sync(sc); 539 540 /* 541 * Send command/address info. 542 */ 543 dc_mii_send(sc, frame->mii_stdelim, 2); 544 dc_mii_send(sc, frame->mii_opcode, 2); 545 dc_mii_send(sc, frame->mii_phyaddr, 5); 546 dc_mii_send(sc, frame->mii_regaddr, 5); 547 548 #ifdef notdef 549 /* Idle bit */ 550 dc_mii_writebit(sc, 1); 551 dc_mii_writebit(sc, 0); 552 #endif 553 554 /* Check for ack */ 555 ack = dc_mii_readbit(sc); 556 557 /* 558 * Now try reading data bits. If the ack failed, we still 559 * need to clock through 16 cycles to keep the PHY(s) in sync. 560 */ 561 if (ack) { 562 for(i = 0; i < 16; i++) { 563 dc_mii_readbit(sc); 564 } 565 goto fail; 566 } 567 568 for (i = 0x8000; i; i >>= 1) { 569 if (!ack) { 570 if (dc_mii_readbit(sc)) 571 frame->mii_data |= i; 572 } 573 } 574 575 fail: 576 577 dc_mii_writebit(sc, 0); 578 dc_mii_writebit(sc, 0); 579 580 splx(s); 581 582 if (ack) 583 return (1); 584 return (0); 585 } 586 587 /* 588 * Write to a PHY register through the MII. 589 */ 590 int 591 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 592 { 593 int s; 594 595 s = splnet(); 596 /* 597 * Set up frame for TX. 598 */ 599 600 frame->mii_stdelim = DC_MII_STARTDELIM; 601 frame->mii_opcode = DC_MII_WRITEOP; 602 frame->mii_turnaround = DC_MII_TURNAROUND; 603 604 /* 605 * Sync the PHYs. 606 */ 607 dc_mii_sync(sc); 608 609 dc_mii_send(sc, frame->mii_stdelim, 2); 610 dc_mii_send(sc, frame->mii_opcode, 2); 611 dc_mii_send(sc, frame->mii_phyaddr, 5); 612 dc_mii_send(sc, frame->mii_regaddr, 5); 613 dc_mii_send(sc, frame->mii_turnaround, 2); 614 dc_mii_send(sc, frame->mii_data, 16); 615 616 /* Idle bit. */ 617 dc_mii_writebit(sc, 0); 618 dc_mii_writebit(sc, 0); 619 620 splx(s); 621 return (0); 622 } 623 624 int 625 dc_miibus_readreg(struct device *self, int phy, int reg) 626 { 627 struct dc_mii_frame frame; 628 struct dc_softc *sc = (struct dc_softc *)self; 629 int i, rval, phy_reg; 630 631 /* 632 * Note: both the AL981 and AN983 have internal PHYs, 633 * however the AL981 provides direct access to the PHY 634 * registers while the AN983 uses a serial MII interface. 635 * The AN983's MII interface is also buggy in that you 636 * can read from any MII address (0 to 31), but only address 1 637 * behaves normally. To deal with both cases, we pretend 638 * that the PHY is at MII address 1. 639 */ 640 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 641 return (0); 642 643 /* 644 * Note: the ukphy probs of the RS7112 report a PHY at 645 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 646 * so we only respond to correct one. 647 */ 648 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 649 return (0); 650 651 if (sc->dc_pmode != DC_PMODE_MII) { 652 if (phy == (MII_NPHY - 1)) { 653 switch(reg) { 654 case MII_BMSR: 655 /* 656 * Fake something to make the probe 657 * code think there's a PHY here. 658 */ 659 return (BMSR_MEDIAMASK); 660 break; 661 case MII_PHYIDR1: 662 if (DC_IS_PNIC(sc)) 663 return (PCI_VENDOR_LITEON); 664 return (PCI_VENDOR_DEC); 665 break; 666 case MII_PHYIDR2: 667 if (DC_IS_PNIC(sc)) 668 return (PCI_PRODUCT_LITEON_PNIC); 669 return (PCI_PRODUCT_DEC_21142); 670 break; 671 default: 672 return (0); 673 break; 674 } 675 } else 676 return (0); 677 } 678 679 if (DC_IS_PNIC(sc)) { 680 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 681 (phy << 23) | (reg << 18)); 682 for (i = 0; i < DC_TIMEOUT; i++) { 683 DELAY(1); 684 rval = CSR_READ_4(sc, DC_PN_MII); 685 if (!(rval & DC_PN_MII_BUSY)) { 686 rval &= 0xFFFF; 687 return (rval == 0xFFFF ? 0 : rval); 688 } 689 } 690 return (0); 691 } 692 693 if (DC_IS_COMET(sc)) { 694 switch(reg) { 695 case MII_BMCR: 696 phy_reg = DC_AL_BMCR; 697 break; 698 case MII_BMSR: 699 phy_reg = DC_AL_BMSR; 700 break; 701 case MII_PHYIDR1: 702 phy_reg = DC_AL_VENID; 703 break; 704 case MII_PHYIDR2: 705 phy_reg = DC_AL_DEVID; 706 break; 707 case MII_ANAR: 708 phy_reg = DC_AL_ANAR; 709 break; 710 case MII_ANLPAR: 711 phy_reg = DC_AL_LPAR; 712 break; 713 case MII_ANER: 714 phy_reg = DC_AL_ANER; 715 break; 716 default: 717 printf("%s: phy_read: bad phy register %x\n", 718 sc->sc_dev.dv_xname, reg); 719 return (0); 720 break; 721 } 722 723 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 724 725 if (rval == 0xFFFF) 726 return (0); 727 return (rval); 728 } 729 730 bzero(&frame, sizeof(frame)); 731 732 frame.mii_phyaddr = phy; 733 frame.mii_regaddr = reg; 734 if (sc->dc_type == DC_TYPE_98713) { 735 phy_reg = CSR_READ_4(sc, DC_NETCFG); 736 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 737 } 738 dc_mii_readreg(sc, &frame); 739 if (sc->dc_type == DC_TYPE_98713) 740 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 741 742 return (frame.mii_data); 743 } 744 745 void 746 dc_miibus_writereg(struct device *self, int phy, int reg, int data) 747 { 748 struct dc_softc *sc = (struct dc_softc *)self; 749 struct dc_mii_frame frame; 750 int i, phy_reg; 751 752 bzero(&frame, sizeof(frame)); 753 754 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 755 return; 756 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 757 return; 758 759 if (DC_IS_PNIC(sc)) { 760 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 761 (phy << 23) | (reg << 10) | data); 762 for (i = 0; i < DC_TIMEOUT; i++) { 763 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 764 break; 765 } 766 return; 767 } 768 769 if (DC_IS_COMET(sc)) { 770 switch(reg) { 771 case MII_BMCR: 772 phy_reg = DC_AL_BMCR; 773 break; 774 case MII_BMSR: 775 phy_reg = DC_AL_BMSR; 776 break; 777 case MII_PHYIDR1: 778 phy_reg = DC_AL_VENID; 779 break; 780 case MII_PHYIDR2: 781 phy_reg = DC_AL_DEVID; 782 break; 783 case MII_ANAR: 784 phy_reg = DC_AL_ANAR; 785 break; 786 case MII_ANLPAR: 787 phy_reg = DC_AL_LPAR; 788 break; 789 case MII_ANER: 790 phy_reg = DC_AL_ANER; 791 break; 792 default: 793 printf("%s: phy_write: bad phy register %x\n", 794 sc->sc_dev.dv_xname, reg); 795 return; 796 break; 797 } 798 799 CSR_WRITE_4(sc, phy_reg, data); 800 return; 801 } 802 803 frame.mii_phyaddr = phy; 804 frame.mii_regaddr = reg; 805 frame.mii_data = data; 806 807 if (sc->dc_type == DC_TYPE_98713) { 808 phy_reg = CSR_READ_4(sc, DC_NETCFG); 809 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 810 } 811 dc_mii_writereg(sc, &frame); 812 if (sc->dc_type == DC_TYPE_98713) 813 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 814 } 815 816 void 817 dc_miibus_statchg(struct device *self) 818 { 819 struct dc_softc *sc = (struct dc_softc *)self; 820 struct mii_data *mii; 821 struct ifmedia *ifm; 822 823 if (DC_IS_ADMTEK(sc)) 824 return; 825 826 mii = &sc->sc_mii; 827 ifm = &mii->mii_media; 828 if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 829 dc_setcfg(sc, ifm->ifm_media); 830 sc->dc_if_media = ifm->ifm_media; 831 } else { 832 dc_setcfg(sc, mii->mii_media_active); 833 sc->dc_if_media = mii->mii_media_active; 834 } 835 } 836 837 #define DC_BITS_512 9 838 #define DC_BITS_128 7 839 #define DC_BITS_64 6 840 841 u_int32_t 842 dc_crc_le(struct dc_softc *sc, caddr_t addr) 843 { 844 u_int32_t crc; 845 846 /* Compute CRC for the address value. */ 847 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 848 849 /* 850 * The hash table on the PNIC II and the MX98715AEC-C/D/E 851 * chips is only 128 bits wide. 852 */ 853 if (sc->dc_flags & DC_128BIT_HASH) 854 return (crc & ((1 << DC_BITS_128) - 1)); 855 856 /* The hash table on the MX98715BEC is only 64 bits wide. */ 857 if (sc->dc_flags & DC_64BIT_HASH) 858 return (crc & ((1 << DC_BITS_64) - 1)); 859 860 /* Xircom's hash filtering table is different (read: weird) */ 861 /* Xircom uses the LEAST significant bits */ 862 if (DC_IS_XIRCOM(sc)) { 863 if ((crc & 0x180) == 0x180) 864 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); 865 else 866 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); 867 } 868 869 return (crc & ((1 << DC_BITS_512) - 1)); 870 } 871 872 /* 873 * Calculate CRC of a multicast group address, return the lower 6 bits. 874 */ 875 #define dc_crc_be(addr) ((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \ 876 & 0x0000003F) 877 878 /* 879 * 21143-style RX filter setup routine. Filter programming is done by 880 * downloading a special setup frame into the TX engine. 21143, Macronix, 881 * PNIC, PNIC II and Davicom chips are programmed this way. 882 * 883 * We always program the chip using 'hash perfect' mode, i.e. one perfect 884 * address (our node address) and a 512-bit hash filter for multicast 885 * frames. We also sneak the broadcast address into the hash filter since 886 * we need that too. 887 */ 888 void 889 dc_setfilt_21143(struct dc_softc *sc) 890 { 891 struct dc_desc *sframe; 892 u_int32_t h, *sp; 893 struct arpcom *ac = &sc->sc_arpcom; 894 struct ether_multi *enm; 895 struct ether_multistep step; 896 struct ifnet *ifp; 897 int i; 898 899 ifp = &sc->sc_arpcom.ac_if; 900 901 i = sc->dc_cdata.dc_tx_prod; 902 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 903 sc->dc_cdata.dc_tx_cnt++; 904 sframe = &sc->dc_ldata->dc_tx_list[i]; 905 sp = &sc->dc_ldata->dc_sbuf[0]; 906 bzero(sp, DC_SFRAME_LEN); 907 908 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr + 909 offsetof(struct dc_list_data, dc_sbuf)); 910 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 911 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 912 913 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = 914 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0]; 915 916 /* If we want promiscuous mode, set the allframes bit. */ 917 if (ifp->if_flags & IFF_PROMISC) 918 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 919 else 920 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 921 922 allmulti: 923 if (ifp->if_flags & IFF_ALLMULTI) 924 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 925 else { 926 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 927 928 ETHER_FIRST_MULTI(step, ac, enm); 929 while (enm != NULL) { 930 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 931 ETHER_ADDR_LEN)) { 932 ifp->if_flags |= IFF_ALLMULTI; 933 goto allmulti; 934 } 935 936 h = dc_crc_le(sc, enm->enm_addrlo); 937 sp[h >> 4] |= htole32(1 << (h & 0xF)); 938 ETHER_NEXT_MULTI(step, enm); 939 } 940 } 941 942 if (ifp->if_flags & IFF_BROADCAST) { 943 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 944 sp[h >> 4] |= htole32(1 << (h & 0xF)); 945 } 946 947 /* Set our MAC address */ 948 sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0); 949 sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1); 950 sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2); 951 952 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 953 offsetof(struct dc_list_data, dc_sbuf[0]), 954 sizeof(struct dc_list_data) - 955 offsetof(struct dc_list_data, dc_sbuf[0]), 956 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 957 958 sframe->dc_status = htole32(DC_TXSTAT_OWN); 959 960 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 961 offsetof(struct dc_list_data, dc_tx_list[i]), 962 sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 963 964 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 965 966 /* 967 * The PNIC takes an exceedingly long time to process its 968 * setup frame; wait 10ms after posting the setup frame 969 * before proceeding, just so it has time to swallow its 970 * medicine. 971 */ 972 DELAY(10000); 973 974 ifp->if_timer = 5; 975 } 976 977 void 978 dc_setfilt_admtek(struct dc_softc *sc) 979 { 980 struct ifnet *ifp; 981 struct arpcom *ac = &sc->sc_arpcom; 982 struct ether_multi *enm; 983 struct ether_multistep step; 984 int h = 0; 985 u_int32_t hashes[2] = { 0, 0 }; 986 987 ifp = &sc->sc_arpcom.ac_if; 988 989 /* Init our MAC address */ 990 CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 | 991 ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]); 992 CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]); 993 994 /* If we want promiscuous mode, set the allframes bit. */ 995 if (ifp->if_flags & IFF_PROMISC) 996 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 997 else 998 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 999 1000 allmulti: 1001 if (ifp->if_flags & IFF_ALLMULTI) 1002 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1003 else 1004 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1005 1006 /* first, zot all the existing hash bits */ 1007 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1008 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1009 1010 /* 1011 * If we're already in promisc or allmulti mode, we 1012 * don't have to bother programming the multicast filter. 1013 */ 1014 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1015 return; 1016 1017 /* now program new ones */ 1018 ETHER_FIRST_MULTI(step, ac, enm); 1019 while (enm != NULL) { 1020 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1021 ifp->if_flags |= IFF_ALLMULTI; 1022 goto allmulti; 1023 } 1024 1025 if (DC_IS_CENTAUR(sc)) 1026 h = dc_crc_le(sc, enm->enm_addrlo); 1027 else 1028 h = dc_crc_be(enm->enm_addrlo); 1029 if (h < 32) 1030 hashes[0] |= (1 << h); 1031 else 1032 hashes[1] |= (1 << (h - 32)); 1033 ETHER_NEXT_MULTI(step, enm); 1034 } 1035 1036 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1037 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1038 } 1039 1040 void 1041 dc_setfilt_asix(struct dc_softc *sc) 1042 { 1043 struct ifnet *ifp; 1044 struct arpcom *ac = &sc->sc_arpcom; 1045 struct ether_multi *enm; 1046 struct ether_multistep step; 1047 int h = 0; 1048 u_int32_t hashes[2] = { 0, 0 }; 1049 1050 ifp = &sc->sc_arpcom.ac_if; 1051 1052 /* Init our MAC address */ 1053 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1054 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1055 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0])); 1056 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1057 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1058 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4])); 1059 1060 /* If we want promiscuous mode, set the allframes bit. */ 1061 if (ifp->if_flags & IFF_PROMISC) 1062 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1063 else 1064 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1065 1066 if (ifp->if_flags & IFF_ALLMULTI) 1067 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1068 else 1069 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1070 1071 /* 1072 * The ASIX chip has a special bit to enable reception 1073 * of broadcast frames. 1074 */ 1075 if (ifp->if_flags & IFF_BROADCAST) 1076 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1077 else 1078 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1079 1080 /* first, zot all the existing hash bits */ 1081 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1082 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1083 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1084 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1085 1086 /* 1087 * If we're already in promisc or allmulti mode, we 1088 * don't have to bother programming the multicast filter. 1089 */ 1090 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1091 return; 1092 1093 /* now program new ones */ 1094 ETHER_FIRST_MULTI(step, ac, enm); 1095 while (enm != NULL) { 1096 h = dc_crc_be(enm->enm_addrlo); 1097 if (h < 32) 1098 hashes[0] |= (1 << h); 1099 else 1100 hashes[1] |= (1 << (h - 32)); 1101 ETHER_NEXT_MULTI(step, enm); 1102 } 1103 1104 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1105 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1106 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1107 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1108 } 1109 1110 void 1111 dc_setfilt_xircom(struct dc_softc *sc) 1112 { 1113 struct dc_desc *sframe; 1114 struct arpcom *ac = &sc->sc_arpcom; 1115 struct ether_multi *enm; 1116 struct ether_multistep step; 1117 u_int32_t h, *sp; 1118 struct ifnet *ifp; 1119 int i; 1120 1121 ifp = &sc->sc_arpcom.ac_if; 1122 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1123 1124 i = sc->dc_cdata.dc_tx_prod; 1125 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1126 sc->dc_cdata.dc_tx_cnt++; 1127 sframe = &sc->dc_ldata->dc_tx_list[i]; 1128 sp = &sc->dc_ldata->dc_sbuf[0]; 1129 bzero(sp, DC_SFRAME_LEN); 1130 1131 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr + 1132 offsetof(struct dc_list_data, dc_sbuf)); 1133 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1134 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1135 1136 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = 1137 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0]; 1138 1139 /* If we want promiscuous mode, set the allframes bit. */ 1140 if (ifp->if_flags & IFF_PROMISC) 1141 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1142 else 1143 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1144 1145 if (ifp->if_flags & IFF_ALLMULTI) 1146 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1147 else 1148 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1149 1150 /* now program new ones */ 1151 ETHER_FIRST_MULTI(step, ac, enm); 1152 while (enm != NULL) { 1153 h = dc_crc_le(sc, enm->enm_addrlo); 1154 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1155 ETHER_NEXT_MULTI(step, enm); 1156 } 1157 1158 if (ifp->if_flags & IFF_BROADCAST) { 1159 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1160 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1161 } 1162 1163 /* Set our MAC address */ 1164 sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0); 1165 sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1); 1166 sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2); 1167 1168 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1169 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1170 ifp->if_flags |= IFF_RUNNING; 1171 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1172 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1173 1174 /* 1175 * wait some time... 1176 */ 1177 DELAY(1000); 1178 1179 ifp->if_timer = 5; 1180 } 1181 1182 void 1183 dc_setfilt(struct dc_softc *sc) 1184 { 1185 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1186 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1187 dc_setfilt_21143(sc); 1188 1189 if (DC_IS_ASIX(sc)) 1190 dc_setfilt_asix(sc); 1191 1192 if (DC_IS_ADMTEK(sc)) 1193 dc_setfilt_admtek(sc); 1194 1195 if (DC_IS_XIRCOM(sc)) 1196 dc_setfilt_xircom(sc); 1197 } 1198 1199 /* 1200 * In order to fiddle with the 1201 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1202 * first have to put the transmit and/or receive logic in the idle state. 1203 */ 1204 void 1205 dc_setcfg(struct dc_softc *sc, int media) 1206 { 1207 int i, restart = 0; 1208 u_int32_t isr; 1209 1210 if (IFM_SUBTYPE(media) == IFM_NONE) 1211 return; 1212 1213 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1214 restart = 1; 1215 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1216 1217 for (i = 0; i < DC_TIMEOUT; i++) { 1218 isr = CSR_READ_4(sc, DC_ISR); 1219 if (isr & DC_ISR_TX_IDLE && 1220 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1221 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1222 break; 1223 DELAY(10); 1224 } 1225 1226 if (i == DC_TIMEOUT) { 1227 if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc)) 1228 printf("%s: failed to force tx to idle state\n", 1229 sc->sc_dev.dv_xname); 1230 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1231 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && 1232 !DC_HAS_BROKEN_RXSTATE(sc)) 1233 printf("%s: failed to force rx to idle state\n", 1234 sc->sc_dev.dv_xname); 1235 } 1236 } 1237 1238 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1239 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1240 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1241 if (sc->dc_pmode == DC_PMODE_MII) { 1242 int watchdogreg; 1243 1244 if (DC_IS_INTEL(sc)) { 1245 /* there's a write enable bit here that reads as 1 */ 1246 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1247 watchdogreg &= ~DC_WDOG_CTLWREN; 1248 watchdogreg |= DC_WDOG_JABBERDIS; 1249 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1250 } else { 1251 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1252 } 1253 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1254 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1255 if (sc->dc_type == DC_TYPE_98713) 1256 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1257 DC_NETCFG_SCRAMBLER)); 1258 if (!DC_IS_DAVICOM(sc)) 1259 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1260 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1261 if (DC_IS_INTEL(sc)) 1262 dc_apply_fixup(sc, IFM_AUTO); 1263 } else { 1264 if (DC_IS_PNIC(sc)) { 1265 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1266 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1267 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1268 } 1269 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1270 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1271 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1272 if (DC_IS_INTEL(sc)) 1273 dc_apply_fixup(sc, 1274 (media & IFM_GMASK) == IFM_FDX ? 1275 IFM_100_TX|IFM_FDX : IFM_100_TX); 1276 } 1277 } 1278 1279 if (IFM_SUBTYPE(media) == IFM_10_T) { 1280 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1281 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1282 if (sc->dc_pmode == DC_PMODE_MII) { 1283 int watchdogreg; 1284 1285 if (DC_IS_INTEL(sc)) { 1286 /* there's a write enable bit here that reads as 1 */ 1287 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1288 watchdogreg &= ~DC_WDOG_CTLWREN; 1289 watchdogreg |= DC_WDOG_JABBERDIS; 1290 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1291 } else { 1292 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1293 } 1294 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1295 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1296 if (sc->dc_type == DC_TYPE_98713) 1297 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1298 if (!DC_IS_DAVICOM(sc)) 1299 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1300 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1301 if (DC_IS_INTEL(sc)) 1302 dc_apply_fixup(sc, IFM_AUTO); 1303 } else { 1304 if (DC_IS_PNIC(sc)) { 1305 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1306 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1307 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1308 } 1309 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1310 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1311 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1312 if (DC_IS_INTEL(sc)) { 1313 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1314 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1315 if ((media & IFM_GMASK) == IFM_FDX) 1316 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1317 else 1318 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1319 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1320 DC_CLRBIT(sc, DC_10BTCTRL, 1321 DC_TCTL_AUTONEGENBL); 1322 dc_apply_fixup(sc, 1323 (media & IFM_GMASK) == IFM_FDX ? 1324 IFM_10_T|IFM_FDX : IFM_10_T); 1325 DELAY(20000); 1326 } 1327 } 1328 } 1329 1330 /* 1331 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1332 * PHY and we want HomePNA mode, set the portsel bit to turn 1333 * on the external MII port. 1334 */ 1335 if (DC_IS_DAVICOM(sc)) { 1336 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1337 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1338 sc->dc_link = 1; 1339 } else { 1340 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1341 } 1342 } 1343 1344 if ((media & IFM_GMASK) == IFM_FDX) { 1345 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1346 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1347 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1348 } else { 1349 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1350 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1351 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1352 } 1353 1354 if (restart) 1355 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1356 } 1357 1358 void 1359 dc_reset(struct dc_softc *sc) 1360 { 1361 int i; 1362 1363 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1364 1365 for (i = 0; i < DC_TIMEOUT; i++) { 1366 DELAY(10); 1367 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1368 break; 1369 } 1370 1371 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || 1372 DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) { 1373 DELAY(10000); 1374 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1375 i = 0; 1376 } 1377 1378 if (i == DC_TIMEOUT) 1379 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 1380 1381 /* Wait a little while for the chip to get its brains in order. */ 1382 DELAY(1000); 1383 1384 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1385 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1386 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1387 1388 /* 1389 * Bring the SIA out of reset. In some cases, it looks 1390 * like failing to unreset the SIA soon enough gets it 1391 * into a state where it will never come out of reset 1392 * until we reset the whole chip again. 1393 */ 1394 if (DC_IS_INTEL(sc)) { 1395 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1396 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1397 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1398 } 1399 1400 if (sc->dc_type == DC_TYPE_21145) 1401 dc_setcfg(sc, IFM_10_T); 1402 } 1403 1404 void 1405 dc_apply_fixup(struct dc_softc *sc, int media) 1406 { 1407 struct dc_mediainfo *m; 1408 u_int8_t *p; 1409 int i; 1410 u_int32_t reg; 1411 1412 m = sc->dc_mi; 1413 1414 while (m != NULL) { 1415 if (m->dc_media == media) 1416 break; 1417 m = m->dc_next; 1418 } 1419 1420 if (m == NULL) 1421 return; 1422 1423 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1424 reg = (p[0] | (p[1] << 8)) << 16; 1425 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1426 } 1427 1428 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1429 reg = (p[0] | (p[1] << 8)) << 16; 1430 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1431 } 1432 } 1433 1434 void 1435 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1436 { 1437 struct dc_mediainfo *m; 1438 1439 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1440 if (m == NULL) 1441 return; 1442 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { 1443 case DC_SIA_CODE_10BT: 1444 m->dc_media = IFM_10_T; 1445 break; 1446 case DC_SIA_CODE_10BT_FDX: 1447 m->dc_media = IFM_10_T|IFM_FDX; 1448 break; 1449 case DC_SIA_CODE_10B2: 1450 m->dc_media = IFM_10_2; 1451 break; 1452 case DC_SIA_CODE_10B5: 1453 m->dc_media = IFM_10_5; 1454 break; 1455 default: 1456 break; 1457 } 1458 1459 /* 1460 * We need to ignore CSR13, CSR14, CSR15 for SIA mode. 1461 * Things apparently already work for cards that do 1462 * supply Media Specific Data. 1463 */ 1464 if (l->dc_sia_code & DC_SIA_CODE_EXT) { 1465 m->dc_gp_len = 2; 1466 m->dc_gp_ptr = 1467 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1468 } else { 1469 m->dc_gp_len = 2; 1470 m->dc_gp_ptr = 1471 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1472 } 1473 1474 m->dc_next = sc->dc_mi; 1475 sc->dc_mi = m; 1476 1477 sc->dc_pmode = DC_PMODE_SIA; 1478 } 1479 1480 void 1481 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1482 { 1483 struct dc_mediainfo *m; 1484 1485 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1486 if (m == NULL) 1487 return; 1488 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1489 m->dc_media = IFM_100_TX; 1490 1491 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1492 m->dc_media = IFM_100_TX|IFM_FDX; 1493 1494 m->dc_gp_len = 2; 1495 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1496 1497 m->dc_next = sc->dc_mi; 1498 sc->dc_mi = m; 1499 1500 sc->dc_pmode = DC_PMODE_SYM; 1501 } 1502 1503 void 1504 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1505 { 1506 u_int8_t *p; 1507 struct dc_mediainfo *m; 1508 1509 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1510 if (m == NULL) 1511 return; 1512 /* We abuse IFM_AUTO to represent MII. */ 1513 m->dc_media = IFM_AUTO; 1514 m->dc_gp_len = l->dc_gpr_len; 1515 1516 p = (u_int8_t *)l; 1517 p += sizeof(struct dc_eblock_mii); 1518 m->dc_gp_ptr = p; 1519 p += 2 * l->dc_gpr_len; 1520 m->dc_reset_len = *p; 1521 p++; 1522 m->dc_reset_ptr = p; 1523 1524 m->dc_next = sc->dc_mi; 1525 sc->dc_mi = m; 1526 } 1527 1528 void 1529 dc_read_srom(struct dc_softc *sc, int bits) 1530 { 1531 int size; 1532 1533 size = 2 << bits; 1534 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1535 if (sc->dc_srom == NULL) 1536 return; 1537 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1538 } 1539 1540 void 1541 dc_parse_21143_srom(struct dc_softc *sc) 1542 { 1543 struct dc_leaf_hdr *lhdr; 1544 struct dc_eblock_hdr *hdr; 1545 int have_mii, i, loff; 1546 char *ptr; 1547 1548 have_mii = 0; 1549 loff = sc->dc_srom[27]; 1550 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1551 1552 ptr = (char *)lhdr; 1553 ptr += sizeof(struct dc_leaf_hdr) - 1; 1554 /* 1555 * Look if we got a MII media block. 1556 */ 1557 for (i = 0; i < lhdr->dc_mcnt; i++) { 1558 hdr = (struct dc_eblock_hdr *)ptr; 1559 if (hdr->dc_type == DC_EBLOCK_MII) 1560 have_mii++; 1561 1562 ptr += (hdr->dc_len & 0x7F); 1563 ptr++; 1564 } 1565 1566 /* 1567 * Do the same thing again. Only use SIA and SYM media 1568 * blocks if no MII media block is available. 1569 */ 1570 ptr = (char *)lhdr; 1571 ptr += sizeof(struct dc_leaf_hdr) - 1; 1572 for (i = 0; i < lhdr->dc_mcnt; i++) { 1573 hdr = (struct dc_eblock_hdr *)ptr; 1574 switch(hdr->dc_type) { 1575 case DC_EBLOCK_MII: 1576 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1577 break; 1578 case DC_EBLOCK_SIA: 1579 if (! have_mii) 1580 dc_decode_leaf_sia(sc, 1581 (struct dc_eblock_sia *)hdr); 1582 break; 1583 case DC_EBLOCK_SYM: 1584 if (! have_mii) 1585 dc_decode_leaf_sym(sc, 1586 (struct dc_eblock_sym *)hdr); 1587 break; 1588 default: 1589 /* Don't care. Yet. */ 1590 break; 1591 } 1592 ptr += (hdr->dc_len & 0x7F); 1593 ptr++; 1594 } 1595 } 1596 1597 /* 1598 * Attach the interface. Allocate softc structures, do ifmedia 1599 * setup and ethernet/BPF attach. 1600 */ 1601 void 1602 dc_attach(struct dc_softc *sc) 1603 { 1604 struct ifnet *ifp; 1605 int mac_offset, tmp, i; 1606 u_int32_t reg; 1607 1608 /* 1609 * Get station address from the EEPROM. 1610 */ 1611 if (sc->sc_hasmac) 1612 goto hasmac; 1613 1614 switch(sc->dc_type) { 1615 case DC_TYPE_98713: 1616 case DC_TYPE_98713A: 1617 case DC_TYPE_987x5: 1618 case DC_TYPE_PNICII: 1619 dc_read_eeprom(sc, (caddr_t)&mac_offset, 1620 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 1621 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 1622 (mac_offset / 2), 3, 0); 1623 break; 1624 case DC_TYPE_PNIC: 1625 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1); 1626 break; 1627 case DC_TYPE_DM9102: 1628 case DC_TYPE_21143: 1629 case DC_TYPE_21145: 1630 case DC_TYPE_ASIX: 1631 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 1632 DC_EE_NODEADDR, 3, 0); 1633 break; 1634 case DC_TYPE_AL981: 1635 case DC_TYPE_AN983: 1636 reg = CSR_READ_4(sc, DC_AL_PAR0); 1637 sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff); 1638 sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff; 1639 sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff; 1640 sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff; 1641 reg = CSR_READ_4(sc, DC_AL_PAR1); 1642 sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff); 1643 sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff; 1644 break; 1645 case DC_TYPE_CONEXANT: 1646 bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR, 1647 &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 1648 break; 1649 case DC_TYPE_XIRCOM: 1650 /* Some newer units have the MAC at offset 8 */ 1651 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0); 1652 1653 if (sc->sc_arpcom.ac_enaddr[0] == 0x00 && 1654 sc->sc_arpcom.ac_enaddr[1] == 0x10 && 1655 sc->sc_arpcom.ac_enaddr[2] == 0xa4) 1656 break; 1657 if (sc->sc_arpcom.ac_enaddr[0] == 0x00 && 1658 sc->sc_arpcom.ac_enaddr[1] == 0x80 && 1659 sc->sc_arpcom.ac_enaddr[2] == 0xc7) 1660 break; 1661 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0); 1662 break; 1663 default: 1664 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 1665 DC_EE_NODEADDR, 3, 0); 1666 break; 1667 } 1668 hasmac: 1669 1670 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data), 1671 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 1672 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { 1673 printf(": can't alloc list mem\n"); 1674 goto fail; 1675 } 1676 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 1677 sizeof(struct dc_list_data), &sc->sc_listkva, 1678 BUS_DMA_NOWAIT) != 0) { 1679 printf(": can't map list mem\n"); 1680 goto fail; 1681 } 1682 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1, 1683 sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT, 1684 &sc->sc_listmap) != 0) { 1685 printf(": can't alloc list map\n"); 1686 goto fail; 1687 } 1688 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 1689 sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 1690 printf(": can't load list map\n"); 1691 goto fail; 1692 } 1693 sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva; 1694 1695 for (i = 0; i < DC_RX_LIST_CNT; i++) { 1696 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1697 0, BUS_DMA_NOWAIT, 1698 &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) { 1699 printf(": can't create rx map\n"); 1700 return; 1701 } 1702 } 1703 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1704 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 1705 printf(": can't create rx spare map\n"); 1706 return; 1707 } 1708 1709 for (i = 0; i < DC_TX_LIST_CNT; i++) { 1710 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1711 DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT, 1712 &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) { 1713 printf(": can't create tx map\n"); 1714 return; 1715 } 1716 } 1717 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5, 1718 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 1719 printf(": can't create tx spare map\n"); 1720 return; 1721 } 1722 1723 /* 1724 * A 21143 or clone chip was detected. Inform the world. 1725 */ 1726 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 1727 1728 ifp = &sc->sc_arpcom.ac_if; 1729 ifp->if_softc = sc; 1730 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1731 ifp->if_ioctl = dc_ioctl; 1732 ifp->if_start = dc_start; 1733 ifp->if_watchdog = dc_watchdog; 1734 ifp->if_baudrate = 10000000; 1735 IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); 1736 IFQ_SET_READY(&ifp->if_snd); 1737 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1738 1739 ifp->if_capabilities = IFCAP_VLAN_MTU; 1740 1741 /* Do MII setup. If this is a 21143, check for a PHY on the 1742 * MII bus after applying any necessary fixups to twiddle the 1743 * GPIO bits. If we don't end up finding a PHY, restore the 1744 * old selection (SIA only or SIA/SYM) and attach the dcphy 1745 * driver instead. 1746 */ 1747 if (DC_IS_INTEL(sc)) { 1748 dc_apply_fixup(sc, IFM_AUTO); 1749 tmp = sc->dc_pmode; 1750 sc->dc_pmode = DC_PMODE_MII; 1751 } 1752 1753 /* 1754 * Setup General Purpose port mode and data so the tulip can talk 1755 * to the MII. This needs to be done before mii_attach so that 1756 * we can actually see them. 1757 */ 1758 if (DC_IS_XIRCOM(sc)) { 1759 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 1760 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 1761 DELAY(10); 1762 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 1763 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 1764 DELAY(10); 1765 } 1766 1767 sc->sc_mii.mii_ifp = ifp; 1768 sc->sc_mii.mii_readreg = dc_miibus_readreg; 1769 sc->sc_mii.mii_writereg = dc_miibus_writereg; 1770 sc->sc_mii.mii_statchg = dc_miibus_statchg; 1771 ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts); 1772 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1773 MII_OFFSET_ANY, 0); 1774 1775 if (DC_IS_INTEL(sc)) { 1776 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1777 sc->dc_pmode = tmp; 1778 if (sc->dc_pmode != DC_PMODE_SIA) 1779 sc->dc_pmode = DC_PMODE_SYM; 1780 sc->dc_flags |= DC_21143_NWAY; 1781 if (sc->dc_flags & DC_MOMENCO_BOTCH) 1782 sc->dc_pmode = DC_PMODE_MII; 1783 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, 1784 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1785 } else { 1786 /* we have a PHY, so we must clear this bit */ 1787 sc->dc_flags &= ~DC_TULIP_LEDS; 1788 } 1789 } 1790 1791 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1792 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1793 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1794 printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname); 1795 } else if (sc->dc_type == DC_TYPE_21145) { 1796 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); 1797 } else 1798 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1799 1800 if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A) 1801 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL); 1802 1803 if (DC_IS_ADMTEK(sc)) { 1804 /* 1805 * Set automatic TX underrun recovery for the ADMtek chips 1806 */ 1807 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 1808 } 1809 1810 /* 1811 * Call MI attach routines. 1812 */ 1813 if_attach(ifp); 1814 ether_ifattach(ifp); 1815 1816 fail: 1817 return; 1818 } 1819 1820 /* 1821 * Initialize the transmit descriptors. 1822 */ 1823 int 1824 dc_list_tx_init(struct dc_softc *sc) 1825 { 1826 struct dc_chain_data *cd; 1827 struct dc_list_data *ld; 1828 int i; 1829 bus_addr_t next; 1830 1831 cd = &sc->dc_cdata; 1832 ld = sc->dc_ldata; 1833 for (i = 0; i < DC_TX_LIST_CNT; i++) { 1834 next = sc->sc_listmap->dm_segs[0].ds_addr; 1835 if (i == (DC_TX_LIST_CNT - 1)) 1836 next += 1837 offsetof(struct dc_list_data, dc_tx_list[0]); 1838 else 1839 next += 1840 offsetof(struct dc_list_data, dc_tx_list[i + 1]); 1841 cd->dc_tx_chain[i].sd_mbuf = NULL; 1842 ld->dc_tx_list[i].dc_data = htole32(0); 1843 ld->dc_tx_list[i].dc_ctl = htole32(0); 1844 ld->dc_tx_list[i].dc_next = htole32(next); 1845 } 1846 1847 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 1848 1849 return (0); 1850 } 1851 1852 1853 /* 1854 * Initialize the RX descriptors and allocate mbufs for them. Note that 1855 * we arrange the descriptors in a closed ring, so that the last descriptor 1856 * points back to the first. 1857 */ 1858 int 1859 dc_list_rx_init(struct dc_softc *sc) 1860 { 1861 struct dc_chain_data *cd; 1862 struct dc_list_data *ld; 1863 int i; 1864 bus_addr_t next; 1865 1866 cd = &sc->dc_cdata; 1867 ld = sc->dc_ldata; 1868 1869 for (i = 0; i < DC_RX_LIST_CNT; i++) { 1870 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 1871 return (ENOBUFS); 1872 next = sc->sc_listmap->dm_segs[0].ds_addr; 1873 if (i == (DC_RX_LIST_CNT - 1)) 1874 next += 1875 offsetof(struct dc_list_data, dc_rx_list[0]); 1876 else 1877 next += 1878 offsetof(struct dc_list_data, dc_rx_list[i + 1]); 1879 ld->dc_rx_list[i].dc_next = htole32(next); 1880 } 1881 1882 cd->dc_rx_prod = 0; 1883 1884 return (0); 1885 } 1886 1887 /* 1888 * Initialize an RX descriptor and attach an MBUF cluster. 1889 */ 1890 int 1891 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m) 1892 { 1893 struct mbuf *m_new = NULL; 1894 struct dc_desc *c; 1895 bus_dmamap_t map; 1896 1897 c = &sc->dc_ldata->dc_rx_list[i]; 1898 1899 if (m == NULL) { 1900 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1901 if (m_new == NULL) 1902 return (ENOBUFS); 1903 1904 MCLGET(m_new, M_DONTWAIT); 1905 if (!(m_new->m_flags & M_EXT)) { 1906 m_freem(m_new); 1907 return (ENOBUFS); 1908 } 1909 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1910 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap, 1911 m_new, BUS_DMA_NOWAIT) != 0) { 1912 m_freem(m_new); 1913 return (ENOBUFS); 1914 } 1915 map = sc->dc_cdata.dc_rx_chain[i].sd_map; 1916 sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap; 1917 sc->sc_rx_sparemap = map; 1918 } else { 1919 /* 1920 * We're re-using a previously allocated mbuf; 1921 * be sure to re-init pointers and lengths to 1922 * default values. 1923 */ 1924 m_new = m; 1925 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1926 m_new->m_data = m_new->m_ext.ext_buf; 1927 } 1928 1929 m_adj(m_new, sizeof(u_int64_t)); 1930 1931 /* 1932 * If this is a PNIC chip, zero the buffer. This is part 1933 * of the workaround for the receive bug in the 82c168 and 1934 * 82c169 chips. 1935 */ 1936 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 1937 bzero(mtod(m_new, char *), m_new->m_len); 1938 1939 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0, 1940 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize, 1941 BUS_DMASYNC_PREREAD); 1942 1943 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new; 1944 c->dc_data = htole32( 1945 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr + 1946 sizeof(u_int64_t)); 1947 c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN); 1948 c->dc_status = htole32(DC_RXSTAT_OWN); 1949 1950 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1951 offsetof(struct dc_list_data, dc_rx_list[i]), 1952 sizeof(struct dc_desc), 1953 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1954 1955 return (0); 1956 } 1957 1958 /* 1959 * Grrrrr. 1960 * The PNIC chip has a terrible bug in it that manifests itself during 1961 * periods of heavy activity. The exact mode of failure if difficult to 1962 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 1963 * will happen on slow machines. The bug is that sometimes instead of 1964 * uploading one complete frame during reception, it uploads what looks 1965 * like the entire contents of its FIFO memory. The frame we want is at 1966 * the end of the whole mess, but we never know exactly how much data has 1967 * been uploaded, so salvaging the frame is hard. 1968 * 1969 * There is only one way to do it reliably, and it's disgusting. 1970 * Here's what we know: 1971 * 1972 * - We know there will always be somewhere between one and three extra 1973 * descriptors uploaded. 1974 * 1975 * - We know the desired received frame will always be at the end of the 1976 * total data upload. 1977 * 1978 * - We know the size of the desired received frame because it will be 1979 * provided in the length field of the status word in the last descriptor. 1980 * 1981 * Here's what we do: 1982 * 1983 * - When we allocate buffers for the receive ring, we bzero() them. 1984 * This means that we know that the buffer contents should be all 1985 * zeros, except for data uploaded by the chip. 1986 * 1987 * - We also force the PNIC chip to upload frames that include the 1988 * ethernet CRC at the end. 1989 * 1990 * - We gather all of the bogus frame data into a single buffer. 1991 * 1992 * - We then position a pointer at the end of this buffer and scan 1993 * backwards until we encounter the first non-zero byte of data. 1994 * This is the end of the received frame. We know we will encounter 1995 * some data at the end of the frame because the CRC will always be 1996 * there, so even if the sender transmits a packet of all zeros, 1997 * we won't be fooled. 1998 * 1999 * - We know the size of the actual received frame, so we subtract 2000 * that value from the current pointer location. This brings us 2001 * to the start of the actual received packet. 2002 * 2003 * - We copy this into an mbuf and pass it on, along with the actual 2004 * frame length. 2005 * 2006 * The performance hit is tremendous, but it beats dropping frames all 2007 * the time. 2008 */ 2009 2010 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2011 void 2012 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2013 { 2014 struct dc_desc *cur_rx; 2015 struct dc_desc *c = NULL; 2016 struct mbuf *m = NULL; 2017 unsigned char *ptr; 2018 int i, total_len; 2019 u_int32_t rxstat = 0; 2020 2021 i = sc->dc_pnic_rx_bug_save; 2022 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2023 ptr = sc->dc_pnic_rx_buf; 2024 bzero(ptr, ETHER_MAX_DIX_LEN * 5); 2025 2026 /* Copy all the bytes from the bogus buffers. */ 2027 while (1) { 2028 c = &sc->dc_ldata->dc_rx_list[i]; 2029 rxstat = letoh32(c->dc_status); 2030 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf; 2031 bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN); 2032 ptr += ETHER_MAX_DIX_LEN; 2033 /* If this is the last buffer, break out. */ 2034 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2035 break; 2036 dc_newbuf(sc, i, m); 2037 DC_INC(i, DC_RX_LIST_CNT); 2038 } 2039 2040 /* Find the length of the actual receive frame. */ 2041 total_len = DC_RXBYTES(rxstat); 2042 2043 /* Scan backwards until we hit a non-zero byte. */ 2044 while(*ptr == 0x00) 2045 ptr--; 2046 2047 /* Round off. */ 2048 if ((unsigned long)(ptr) & 0x3) 2049 ptr -= 1; 2050 2051 /* Now find the start of the frame. */ 2052 ptr -= total_len; 2053 if (ptr < sc->dc_pnic_rx_buf) 2054 ptr = sc->dc_pnic_rx_buf; 2055 2056 /* 2057 * Now copy the salvaged frame to the last mbuf and fake up 2058 * the status word to make it look like a successful 2059 * frame reception. 2060 */ 2061 dc_newbuf(sc, i, m); 2062 bcopy(ptr, mtod(m, char *), total_len); 2063 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2064 } 2065 2066 /* 2067 * This routine searches the RX ring for dirty descriptors in the 2068 * event that the rxeof routine falls out of sync with the chip's 2069 * current descriptor pointer. This may happen sometimes as a result 2070 * of a "no RX buffer available" condition that happens when the chip 2071 * consumes all of the RX buffers before the driver has a chance to 2072 * process the RX ring. This routine may need to be called more than 2073 * once to bring the driver back in sync with the chip, however we 2074 * should still be getting RX DONE interrupts to drive the search 2075 * for new packets in the RX ring, so we should catch up eventually. 2076 */ 2077 int 2078 dc_rx_resync(struct dc_softc *sc) 2079 { 2080 u_int32_t stat; 2081 int i, pos, offset; 2082 2083 pos = sc->dc_cdata.dc_rx_prod; 2084 2085 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2086 2087 offset = offsetof(struct dc_list_data, dc_rx_list[pos]); 2088 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2089 offset, sizeof(struct dc_desc), 2090 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2091 2092 stat = sc->dc_ldata->dc_rx_list[pos].dc_status; 2093 if (!(stat & htole32(DC_RXSTAT_OWN))) 2094 break; 2095 DC_INC(pos, DC_RX_LIST_CNT); 2096 } 2097 2098 /* If the ring really is empty, then just return. */ 2099 if (i == DC_RX_LIST_CNT) 2100 return (0); 2101 2102 /* We've fallen behind the chip: catch it. */ 2103 sc->dc_cdata.dc_rx_prod = pos; 2104 2105 return (EAGAIN); 2106 } 2107 2108 /* 2109 * A frame has been uploaded: pass the resulting mbuf chain up to 2110 * the higher level protocols. 2111 */ 2112 void 2113 dc_rxeof(struct dc_softc *sc) 2114 { 2115 struct mbuf *m; 2116 struct ifnet *ifp; 2117 struct dc_desc *cur_rx; 2118 int i, offset, total_len = 0; 2119 u_int32_t rxstat; 2120 2121 ifp = &sc->sc_arpcom.ac_if; 2122 i = sc->dc_cdata.dc_rx_prod; 2123 2124 for(;;) { 2125 struct mbuf *m0 = NULL; 2126 2127 offset = offsetof(struct dc_list_data, dc_rx_list[i]); 2128 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2129 offset, sizeof(struct dc_desc), 2130 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2131 2132 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2133 rxstat = letoh32(cur_rx->dc_status); 2134 if (rxstat & DC_RXSTAT_OWN) 2135 break; 2136 2137 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf; 2138 total_len = DC_RXBYTES(rxstat); 2139 2140 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 2141 0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize, 2142 BUS_DMASYNC_POSTREAD); 2143 2144 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2145 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2146 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2147 sc->dc_pnic_rx_bug_save = i; 2148 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2149 DC_INC(i, DC_RX_LIST_CNT); 2150 continue; 2151 } 2152 dc_pnic_rx_bug_war(sc, i); 2153 rxstat = letoh32(cur_rx->dc_status); 2154 total_len = DC_RXBYTES(rxstat); 2155 } 2156 } 2157 2158 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL; 2159 2160 /* 2161 * If an error occurs, update stats, clear the 2162 * status word and leave the mbuf cluster in place: 2163 * it should simply get re-used next time this descriptor 2164 * comes up in the ring. However, don't report long 2165 * frames as errors since they could be VLANs. 2166 */ 2167 if ((rxstat & DC_RXSTAT_RXERR)) { 2168 if (!(rxstat & DC_RXSTAT_GIANT) || 2169 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2170 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2171 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2172 ifp->if_ierrors++; 2173 if (rxstat & DC_RXSTAT_COLLSEEN) 2174 ifp->if_collisions++; 2175 dc_newbuf(sc, i, m); 2176 if (rxstat & DC_RXSTAT_CRCERR) { 2177 DC_INC(i, DC_RX_LIST_CNT); 2178 continue; 2179 } else { 2180 dc_init(sc); 2181 return; 2182 } 2183 } 2184 } 2185 2186 /* No errors; receive the packet. */ 2187 total_len -= ETHER_CRC_LEN; 2188 2189 m->m_pkthdr.rcvif = ifp; 2190 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 2191 ifp, NULL); 2192 dc_newbuf(sc, i, m); 2193 DC_INC(i, DC_RX_LIST_CNT); 2194 if (m0 == NULL) { 2195 ifp->if_ierrors++; 2196 continue; 2197 } 2198 m = m0; 2199 2200 ifp->if_ipackets++; 2201 #if NBPFILTER > 0 2202 if (ifp->if_bpf) 2203 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 2204 #endif 2205 ether_input_mbuf(ifp, m); 2206 } 2207 2208 sc->dc_cdata.dc_rx_prod = i; 2209 } 2210 2211 /* 2212 * A frame was downloaded to the chip. It's safe for us to clean up 2213 * the list buffers. 2214 */ 2215 2216 void 2217 dc_txeof(struct dc_softc *sc) 2218 { 2219 struct dc_desc *cur_tx = NULL; 2220 struct ifnet *ifp; 2221 int idx, offset; 2222 2223 ifp = &sc->sc_arpcom.ac_if; 2224 2225 /* 2226 * Go through our tx list and free mbufs for those 2227 * frames that have been transmitted. 2228 */ 2229 idx = sc->dc_cdata.dc_tx_cons; 2230 while(idx != sc->dc_cdata.dc_tx_prod) { 2231 u_int32_t txstat; 2232 2233 offset = offsetof(struct dc_list_data, dc_tx_list[idx]); 2234 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2235 offset, sizeof(struct dc_desc), 2236 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2237 2238 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2239 txstat = letoh32(cur_tx->dc_status); 2240 2241 if (txstat & DC_TXSTAT_OWN) 2242 break; 2243 2244 if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) || 2245 cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) { 2246 if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) { 2247 /* 2248 * Yes, the PNIC is so brain damaged 2249 * that it will sometimes generate a TX 2250 * underrun error while DMAing the RX 2251 * filter setup frame. If we detect this, 2252 * we have to send the setup frame again, 2253 * or else the filter won't be programmed 2254 * correctly. 2255 */ 2256 if (DC_IS_PNIC(sc)) { 2257 if (txstat & DC_TXSTAT_ERRSUM) 2258 dc_setfilt(sc); 2259 } 2260 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL; 2261 } 2262 sc->dc_cdata.dc_tx_cnt--; 2263 DC_INC(idx, DC_TX_LIST_CNT); 2264 continue; 2265 } 2266 2267 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2268 /* 2269 * XXX: Why does my Xircom taunt me so? 2270 * For some reason it likes setting the CARRLOST flag 2271 * even when the carrier is there. wtf?! 2272 * Who knows, but Conexant chips have the 2273 * same problem. Maybe they took lessons 2274 * from Xircom. 2275 */ 2276 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2277 sc->dc_pmode == DC_PMODE_MII && 2278 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2279 DC_TXSTAT_NOCARRIER))) 2280 txstat &= ~DC_TXSTAT_ERRSUM; 2281 } else { 2282 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2283 sc->dc_pmode == DC_PMODE_MII && 2284 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2285 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2286 txstat &= ~DC_TXSTAT_ERRSUM; 2287 } 2288 2289 if (txstat & DC_TXSTAT_ERRSUM) { 2290 ifp->if_oerrors++; 2291 if (txstat & DC_TXSTAT_EXCESSCOLL) 2292 ifp->if_collisions++; 2293 if (txstat & DC_TXSTAT_LATECOLL) 2294 ifp->if_collisions++; 2295 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2296 dc_init(sc); 2297 return; 2298 } 2299 } 2300 2301 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2302 2303 ifp->if_opackets++; 2304 if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) { 2305 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map; 2306 2307 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2308 BUS_DMASYNC_POSTWRITE); 2309 bus_dmamap_unload(sc->sc_dmat, map); 2310 } 2311 if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) { 2312 m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf); 2313 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL; 2314 } 2315 2316 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2317 offset, sizeof(struct dc_desc), 2318 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2319 2320 sc->dc_cdata.dc_tx_cnt--; 2321 DC_INC(idx, DC_TX_LIST_CNT); 2322 } 2323 sc->dc_cdata.dc_tx_cons = idx; 2324 2325 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5) 2326 ifp->if_flags &= ~IFF_OACTIVE; 2327 if (sc->dc_cdata.dc_tx_cnt == 0) 2328 ifp->if_timer = 0; 2329 } 2330 2331 void 2332 dc_tick(void *xsc) 2333 { 2334 struct dc_softc *sc = (struct dc_softc *)xsc; 2335 struct mii_data *mii; 2336 struct ifnet *ifp; 2337 int s; 2338 u_int32_t r; 2339 2340 s = splnet(); 2341 2342 ifp = &sc->sc_arpcom.ac_if; 2343 mii = &sc->sc_mii; 2344 2345 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2346 if (sc->dc_flags & DC_21143_NWAY) { 2347 r = CSR_READ_4(sc, DC_10BTSTAT); 2348 if (IFM_SUBTYPE(mii->mii_media_active) == 2349 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2350 sc->dc_link = 0; 2351 mii_mediachg(mii); 2352 } 2353 if (IFM_SUBTYPE(mii->mii_media_active) == 2354 IFM_10_T && (r & DC_TSTAT_LS10)) { 2355 sc->dc_link = 0; 2356 mii_mediachg(mii); 2357 } 2358 if (sc->dc_link == 0) 2359 mii_tick(mii); 2360 } else { 2361 /* 2362 * For NICs which never report DC_RXSTATE_WAIT, we 2363 * have to bite the bullet... 2364 */ 2365 if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc, 2366 DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && 2367 sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) { 2368 mii_tick(mii); 2369 if (!(mii->mii_media_status & IFM_ACTIVE)) 2370 sc->dc_link = 0; 2371 } 2372 } 2373 } else 2374 mii_tick(mii); 2375 2376 /* 2377 * When the init routine completes, we expect to be able to send 2378 * packets right away, and in fact the network code will send a 2379 * gratuitous ARP the moment the init routine marks the interface 2380 * as running. However, even though the MAC may have been initialized, 2381 * there may be a delay of a few seconds before the PHY completes 2382 * autonegotiation and the link is brought up. Any transmissions 2383 * made during that delay will be lost. Dealing with this is tricky: 2384 * we can't just pause in the init routine while waiting for the 2385 * PHY to come ready since that would bring the whole system to 2386 * a screeching halt for several seconds. 2387 * 2388 * What we do here is prevent the TX start routine from sending 2389 * any packets until a link has been established. After the 2390 * interface has been initialized, the tick routine will poll 2391 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2392 * that time, packets will stay in the send queue, and once the 2393 * link comes up, they will be flushed out to the wire. 2394 */ 2395 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2396 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2397 sc->dc_link++; 2398 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2399 dc_start(ifp); 2400 } 2401 2402 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2403 timeout_add_msec(&sc->dc_tick_tmo, 100); 2404 else 2405 timeout_add_sec(&sc->dc_tick_tmo, 1); 2406 2407 splx(s); 2408 } 2409 2410 /* A transmit underrun has occurred. Back off the transmit threshold, 2411 * or switch to store and forward mode if we have to. 2412 */ 2413 void 2414 dc_tx_underrun(struct dc_softc *sc) 2415 { 2416 u_int32_t isr; 2417 int i; 2418 2419 if (DC_IS_DAVICOM(sc)) 2420 dc_init(sc); 2421 2422 if (DC_IS_INTEL(sc)) { 2423 /* 2424 * The real 21143 requires that the transmitter be idle 2425 * in order to change the transmit threshold or store 2426 * and forward state. 2427 */ 2428 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2429 2430 for (i = 0; i < DC_TIMEOUT; i++) { 2431 isr = CSR_READ_4(sc, DC_ISR); 2432 if (isr & DC_ISR_TX_IDLE) 2433 break; 2434 DELAY(10); 2435 } 2436 if (i == DC_TIMEOUT) { 2437 printf("%s: failed to force tx to idle state\n", 2438 sc->sc_dev.dv_xname); 2439 dc_init(sc); 2440 } 2441 } 2442 2443 sc->dc_txthresh += DC_TXTHRESH_INC; 2444 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2445 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2446 } else { 2447 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2448 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2449 } 2450 2451 if (DC_IS_INTEL(sc)) 2452 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2453 2454 return; 2455 } 2456 2457 int 2458 dc_intr(void *arg) 2459 { 2460 struct dc_softc *sc; 2461 struct ifnet *ifp; 2462 u_int32_t status, ints; 2463 int claimed = 0; 2464 2465 sc = arg; 2466 2467 ifp = &sc->sc_arpcom.ac_if; 2468 2469 ints = CSR_READ_4(sc, DC_ISR); 2470 if ((ints & DC_INTRS) == 0) 2471 return (claimed); 2472 if (ints == 0xffffffff) 2473 return (0); 2474 2475 /* Suppress unwanted interrupts */ 2476 if (!(ifp->if_flags & IFF_UP)) { 2477 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 2478 dc_stop(sc, 0); 2479 return (claimed); 2480 } 2481 2482 /* Disable interrupts. */ 2483 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2484 2485 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && 2486 status != 0xFFFFFFFF && 2487 (ifp->if_flags & IFF_RUNNING)) { 2488 2489 claimed = 1; 2490 CSR_WRITE_4(sc, DC_ISR, status); 2491 2492 if (status & DC_ISR_RX_OK) { 2493 int curpkts; 2494 curpkts = ifp->if_ipackets; 2495 dc_rxeof(sc); 2496 if (curpkts == ifp->if_ipackets) { 2497 while(dc_rx_resync(sc)) 2498 dc_rxeof(sc); 2499 } 2500 } 2501 2502 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 2503 dc_txeof(sc); 2504 2505 if (status & DC_ISR_TX_IDLE) { 2506 dc_txeof(sc); 2507 if (sc->dc_cdata.dc_tx_cnt) { 2508 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2509 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2510 } 2511 } 2512 2513 if (status & DC_ISR_TX_UNDERRUN) 2514 dc_tx_underrun(sc); 2515 2516 if ((status & DC_ISR_RX_WATDOGTIMEO) 2517 || (status & DC_ISR_RX_NOBUF)) { 2518 int curpkts; 2519 curpkts = ifp->if_ipackets; 2520 dc_rxeof(sc); 2521 if (curpkts == ifp->if_ipackets) { 2522 while(dc_rx_resync(sc)) 2523 dc_rxeof(sc); 2524 } 2525 } 2526 2527 if (status & DC_ISR_BUS_ERR) { 2528 dc_reset(sc); 2529 dc_init(sc); 2530 } 2531 } 2532 2533 /* Re-enable interrupts. */ 2534 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2535 2536 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2537 dc_start(ifp); 2538 2539 return (claimed); 2540 } 2541 2542 /* 2543 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2544 * pointers to the fragment pointers. 2545 */ 2546 int 2547 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 2548 { 2549 struct dc_desc *f = NULL; 2550 int frag, cur, cnt = 0, i; 2551 bus_dmamap_t map; 2552 2553 /* 2554 * Start packing the mbufs in this chain into 2555 * the fragment pointers. Stop when we run out 2556 * of fragments or hit the end of the mbuf chain. 2557 */ 2558 map = sc->sc_tx_sparemap; 2559 2560 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 2561 m_head, BUS_DMA_NOWAIT) != 0) 2562 return (ENOBUFS); 2563 2564 cur = frag = *txidx; 2565 2566 for (i = 0; i < map->dm_nsegs; i++) { 2567 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 2568 if (*txidx != sc->dc_cdata.dc_tx_prod && 2569 frag == (DC_TX_LIST_CNT - 1)) { 2570 bus_dmamap_unload(sc->sc_dmat, map); 2571 return (ENOBUFS); 2572 } 2573 } 2574 if ((DC_TX_LIST_CNT - 2575 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) { 2576 bus_dmamap_unload(sc->sc_dmat, map); 2577 return (ENOBUFS); 2578 } 2579 2580 f = &sc->dc_ldata->dc_tx_list[frag]; 2581 f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len); 2582 if (cnt == 0) { 2583 f->dc_status = htole32(0); 2584 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 2585 } else 2586 f->dc_status = htole32(DC_TXSTAT_OWN); 2587 f->dc_data = htole32(map->dm_segs[i].ds_addr); 2588 cur = frag; 2589 DC_INC(frag, DC_TX_LIST_CNT); 2590 cnt++; 2591 } 2592 2593 sc->dc_cdata.dc_tx_cnt += cnt; 2594 sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head; 2595 sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map; 2596 sc->dc_cdata.dc_tx_chain[cur].sd_map = map; 2597 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 2598 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 2599 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= 2600 htole32(DC_TXCTL_FINT); 2601 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 2602 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2603 htole32(DC_TXCTL_FINT); 2604 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 2605 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2606 htole32(DC_TXCTL_FINT); 2607 else if ((sc->dc_flags & DC_TX_USE_TX_INTR) && 2608 TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd)) 2609 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2610 htole32(DC_TXCTL_FINT); 2611 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2612 BUS_DMASYNC_PREWRITE); 2613 2614 sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN); 2615 2616 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2617 offsetof(struct dc_list_data, dc_tx_list[*txidx]), 2618 sizeof(struct dc_desc) * cnt, 2619 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2620 2621 *txidx = frag; 2622 2623 return (0); 2624 } 2625 2626 /* 2627 * Coalesce an mbuf chain into a single mbuf cluster buffer. 2628 * Needed for some really badly behaved chips that just can't 2629 * do scatter/gather correctly. 2630 */ 2631 int 2632 dc_coal(struct dc_softc *sc, struct mbuf **m_head) 2633 { 2634 struct mbuf *m_new, *m; 2635 2636 m = *m_head; 2637 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 2638 if (m_new == NULL) 2639 return (ENOBUFS); 2640 if (m->m_pkthdr.len > MHLEN) { 2641 MCLGET(m_new, M_DONTWAIT); 2642 if (!(m_new->m_flags & M_EXT)) { 2643 m_freem(m_new); 2644 return (ENOBUFS); 2645 } 2646 } 2647 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 2648 m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; 2649 m_freem(m); 2650 *m_head = m_new; 2651 2652 return (0); 2653 } 2654 2655 /* 2656 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2657 * to the mbuf data regions directly in the transmit lists. We also save a 2658 * copy of the pointers since the transmit list fragment pointers are 2659 * physical addresses. 2660 */ 2661 2662 void 2663 dc_start(struct ifnet *ifp) 2664 { 2665 struct dc_softc *sc; 2666 struct mbuf *m_head = NULL; 2667 int idx; 2668 2669 sc = ifp->if_softc; 2670 2671 if (!sc->dc_link && IFQ_LEN(&ifp->if_snd) < 10) 2672 return; 2673 2674 if (ifp->if_flags & IFF_OACTIVE) 2675 return; 2676 2677 idx = sc->dc_cdata.dc_tx_prod; 2678 2679 while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) { 2680 IFQ_POLL(&ifp->if_snd, m_head); 2681 if (m_head == NULL) 2682 break; 2683 2684 if (sc->dc_flags & DC_TX_COALESCE && 2685 (m_head->m_next != NULL || 2686 sc->dc_flags & DC_TX_ALIGN)) { 2687 /* note: dc_coal breaks the poll-and-dequeue rule. 2688 * if dc_coal fails, we lose the packet. 2689 */ 2690 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2691 if (dc_coal(sc, &m_head)) { 2692 ifp->if_flags |= IFF_OACTIVE; 2693 break; 2694 } 2695 } 2696 2697 if (dc_encap(sc, m_head, &idx)) { 2698 ifp->if_flags |= IFF_OACTIVE; 2699 break; 2700 } 2701 2702 /* now we are committed to transmit the packet */ 2703 if (sc->dc_flags & DC_TX_COALESCE) { 2704 /* if mbuf is coalesced, it is already dequeued */ 2705 } else 2706 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2707 2708 /* 2709 * If there's a BPF listener, bounce a copy of this frame 2710 * to him. 2711 */ 2712 #if NBPFILTER > 0 2713 if (ifp->if_bpf) 2714 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 2715 #endif 2716 if (sc->dc_flags & DC_TX_ONE) { 2717 ifp->if_flags |= IFF_OACTIVE; 2718 break; 2719 } 2720 } 2721 if (idx == sc->dc_cdata.dc_tx_prod) 2722 return; 2723 2724 /* Transmit */ 2725 sc->dc_cdata.dc_tx_prod = idx; 2726 if (!(sc->dc_flags & DC_TX_POLL)) 2727 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2728 2729 /* 2730 * Set a timeout in case the chip goes out to lunch. 2731 */ 2732 ifp->if_timer = 5; 2733 } 2734 2735 void 2736 dc_init(void *xsc) 2737 { 2738 struct dc_softc *sc = xsc; 2739 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2740 struct mii_data *mii; 2741 int s; 2742 2743 s = splnet(); 2744 2745 mii = &sc->sc_mii; 2746 2747 /* 2748 * Cancel pending I/O and free all RX/TX buffers. 2749 */ 2750 dc_stop(sc, 0); 2751 dc_reset(sc); 2752 2753 /* 2754 * Set cache alignment and burst length. 2755 */ 2756 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 2757 CSR_WRITE_4(sc, DC_BUSCTL, 0); 2758 else 2759 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 2760 /* 2761 * Evenly share the bus between receive and transmit process. 2762 */ 2763 if (DC_IS_INTEL(sc)) 2764 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 2765 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 2766 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 2767 } else { 2768 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 2769 } 2770 if (sc->dc_flags & DC_TX_POLL) 2771 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 2772 switch(sc->dc_cachesize) { 2773 case 32: 2774 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 2775 break; 2776 case 16: 2777 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 2778 break; 2779 case 8: 2780 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 2781 break; 2782 case 0: 2783 default: 2784 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 2785 break; 2786 } 2787 2788 if (sc->dc_flags & DC_TX_STORENFWD) 2789 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2790 else { 2791 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2792 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2793 } else { 2794 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2795 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2796 } 2797 } 2798 2799 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 2800 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 2801 2802 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2803 /* 2804 * The app notes for the 98713 and 98715A say that 2805 * in order to have the chips operate properly, a magic 2806 * number must be written to CSR16. Macronix does not 2807 * document the meaning of these bits so there's no way 2808 * to know exactly what they do. The 98713 has a magic 2809 * number all its own; the rest all use a different one. 2810 */ 2811 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 2812 if (sc->dc_type == DC_TYPE_98713) 2813 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 2814 else 2815 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 2816 } 2817 2818 if (DC_IS_XIRCOM(sc)) { 2819 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2820 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2821 DELAY(10); 2822 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2823 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2824 DELAY(10); 2825 } 2826 2827 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2828 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 2829 2830 /* Init circular RX list. */ 2831 if (dc_list_rx_init(sc) == ENOBUFS) { 2832 printf("%s: initialization failed: no " 2833 "memory for rx buffers\n", sc->sc_dev.dv_xname); 2834 dc_stop(sc, 0); 2835 splx(s); 2836 return; 2837 } 2838 2839 /* 2840 * Init tx descriptors. 2841 */ 2842 dc_list_tx_init(sc); 2843 2844 /* 2845 * Sync down both lists initialized. 2846 */ 2847 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2848 0, sc->sc_listmap->dm_mapsize, 2849 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2850 2851 /* 2852 * Load the address of the RX list. 2853 */ 2854 CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 2855 offsetof(struct dc_list_data, dc_rx_list[0])); 2856 CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 2857 offsetof(struct dc_list_data, dc_tx_list[0])); 2858 2859 /* 2860 * Enable interrupts. 2861 */ 2862 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2863 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 2864 2865 /* Enable transmitter. */ 2866 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2867 2868 /* 2869 * If this is an Intel 21143 and we're not using the 2870 * MII port, program the LED control pins so we get 2871 * link and activity indications. 2872 */ 2873 if (sc->dc_flags & DC_TULIP_LEDS) { 2874 CSR_WRITE_4(sc, DC_WATCHDOG, 2875 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 2876 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 2877 } 2878 2879 /* 2880 * Load the RX/multicast filter. We do this sort of late 2881 * because the filter programming scheme on the 21143 and 2882 * some clones requires DMAing a setup frame via the TX 2883 * engine, and we need the transmitter enabled for that. 2884 */ 2885 dc_setfilt(sc); 2886 2887 /* Enable receiver. */ 2888 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 2889 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 2890 2891 mii_mediachg(mii); 2892 dc_setcfg(sc, sc->dc_if_media); 2893 2894 ifp->if_flags |= IFF_RUNNING; 2895 ifp->if_flags &= ~IFF_OACTIVE; 2896 2897 splx(s); 2898 2899 timeout_set(&sc->dc_tick_tmo, dc_tick, sc); 2900 2901 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 2902 sc->dc_link = 1; 2903 else { 2904 if (sc->dc_flags & DC_21143_NWAY) 2905 timeout_add_msec(&sc->dc_tick_tmo, 100); 2906 else 2907 timeout_add_sec(&sc->dc_tick_tmo, 1); 2908 } 2909 2910 #ifdef SRM_MEDIA 2911 if(sc->dc_srm_media) { 2912 struct ifreq ifr; 2913 2914 ifr.ifr_media = sc->dc_srm_media; 2915 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 2916 sc->dc_srm_media = 0; 2917 } 2918 #endif 2919 } 2920 2921 /* 2922 * Set media options. 2923 */ 2924 int 2925 dc_ifmedia_upd(struct ifnet *ifp) 2926 { 2927 struct dc_softc *sc; 2928 struct mii_data *mii; 2929 struct ifmedia *ifm; 2930 2931 sc = ifp->if_softc; 2932 mii = &sc->sc_mii; 2933 mii_mediachg(mii); 2934 2935 ifm = &mii->mii_media; 2936 2937 if (DC_IS_DAVICOM(sc) && 2938 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 2939 dc_setcfg(sc, ifm->ifm_media); 2940 else 2941 sc->dc_link = 0; 2942 2943 return (0); 2944 } 2945 2946 /* 2947 * Report current media status. 2948 */ 2949 void 2950 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2951 { 2952 struct dc_softc *sc; 2953 struct mii_data *mii; 2954 struct ifmedia *ifm; 2955 2956 sc = ifp->if_softc; 2957 mii = &sc->sc_mii; 2958 mii_pollstat(mii); 2959 ifm = &mii->mii_media; 2960 if (DC_IS_DAVICOM(sc)) { 2961 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 2962 ifmr->ifm_active = ifm->ifm_media; 2963 ifmr->ifm_status = 0; 2964 return; 2965 } 2966 } 2967 ifmr->ifm_active = mii->mii_media_active; 2968 ifmr->ifm_status = mii->mii_media_status; 2969 } 2970 2971 int 2972 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2973 { 2974 struct dc_softc *sc = ifp->if_softc; 2975 struct ifreq *ifr = (struct ifreq *) data; 2976 struct ifaddr *ifa = (struct ifaddr *)data; 2977 struct mii_data *mii; 2978 int s, error = 0; 2979 2980 s = splnet(); 2981 2982 switch(command) { 2983 case SIOCSIFADDR: 2984 ifp->if_flags |= IFF_UP; 2985 if (!(ifp->if_flags & IFF_RUNNING)) 2986 dc_init(sc); 2987 #ifdef INET 2988 if (ifa->ifa_addr->sa_family == AF_INET) 2989 arp_ifinit(&sc->sc_arpcom, ifa); 2990 #endif 2991 break; 2992 case SIOCSIFFLAGS: 2993 if (ifp->if_flags & IFF_UP) { 2994 if (ifp->if_flags & IFF_RUNNING && 2995 (ifp->if_flags ^ sc->dc_if_flags) & 2996 IFF_PROMISC) { 2997 dc_setfilt(sc); 2998 } else { 2999 if (!(ifp->if_flags & IFF_RUNNING)) { 3000 sc->dc_txthresh = 0; 3001 dc_init(sc); 3002 } 3003 } 3004 } else { 3005 if (ifp->if_flags & IFF_RUNNING) 3006 dc_stop(sc, 0); 3007 } 3008 sc->dc_if_flags = ifp->if_flags; 3009 break; 3010 case SIOCGIFMEDIA: 3011 case SIOCSIFMEDIA: 3012 mii = &sc->sc_mii; 3013 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3014 #ifdef SRM_MEDIA 3015 if (sc->dc_srm_media) 3016 sc->dc_srm_media = 0; 3017 #endif 3018 break; 3019 default: 3020 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 3021 } 3022 3023 if (error == ENETRESET) { 3024 if (ifp->if_flags & IFF_RUNNING) 3025 dc_setfilt(sc); 3026 error = 0; 3027 } 3028 3029 splx(s); 3030 return (error); 3031 } 3032 3033 void 3034 dc_watchdog(struct ifnet *ifp) 3035 { 3036 struct dc_softc *sc; 3037 3038 sc = ifp->if_softc; 3039 3040 ifp->if_oerrors++; 3041 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 3042 3043 dc_stop(sc, 0); 3044 dc_reset(sc); 3045 dc_init(sc); 3046 3047 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 3048 dc_start(ifp); 3049 } 3050 3051 /* 3052 * Stop the adapter and free any mbufs allocated to the 3053 * RX and TX lists. 3054 */ 3055 void 3056 dc_stop(struct dc_softc *sc, int softonly) 3057 { 3058 struct ifnet *ifp; 3059 u_int32_t isr; 3060 int i; 3061 3062 ifp = &sc->sc_arpcom.ac_if; 3063 ifp->if_timer = 0; 3064 3065 timeout_del(&sc->dc_tick_tmo); 3066 3067 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3068 3069 if (!softonly) { 3070 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3071 3072 for (i = 0; i < DC_TIMEOUT; i++) { 3073 isr = CSR_READ_4(sc, DC_ISR); 3074 if ((isr & DC_ISR_TX_IDLE || 3075 (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) && 3076 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) 3077 break; 3078 DELAY(10); 3079 } 3080 3081 if (i == DC_TIMEOUT) { 3082 if (!((isr & DC_ISR_TX_IDLE) || 3083 (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) && 3084 !DC_IS_ASIX(sc) && !DC_IS_DAVICOM(sc)) 3085 printf("%s: failed to force tx to idle state\n", 3086 sc->sc_dev.dv_xname); 3087 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) && 3088 !DC_HAS_BROKEN_RXSTATE(sc)) 3089 printf("%s: failed to force rx to idle state\n", 3090 sc->sc_dev.dv_xname); 3091 } 3092 3093 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3094 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3095 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3096 sc->dc_link = 0; 3097 } 3098 3099 /* 3100 * Free data in the RX lists. 3101 */ 3102 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3103 if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) { 3104 bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map; 3105 3106 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3107 BUS_DMASYNC_POSTREAD); 3108 bus_dmamap_unload(sc->sc_dmat, map); 3109 } 3110 if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) { 3111 m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf); 3112 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL; 3113 } 3114 } 3115 bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list)); 3116 3117 /* 3118 * Free the TX list buffers. 3119 */ 3120 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3121 if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) { 3122 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map; 3123 3124 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3125 BUS_DMASYNC_POSTWRITE); 3126 bus_dmamap_unload(sc->sc_dmat, map); 3127 } 3128 if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) { 3129 if (sc->dc_ldata->dc_tx_list[i].dc_ctl & 3130 htole32(DC_TXCTL_SETUP)) { 3131 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL; 3132 continue; 3133 } 3134 m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf); 3135 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL; 3136 } 3137 } 3138 bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list)); 3139 3140 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 3141 0, sc->sc_listmap->dm_mapsize, 3142 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3143 } 3144 3145 int 3146 dc_activate(struct device *self, int act) 3147 { 3148 struct dc_softc *sc = (struct dc_softc *)self; 3149 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 3150 int rv = 0; 3151 3152 switch (act) { 3153 case DVACT_QUIESCE: 3154 rv = config_activate_children(self, act); 3155 break; 3156 case DVACT_SUSPEND: 3157 if (ifp->if_flags & IFF_RUNNING) 3158 dc_stop(sc, 0); 3159 rv = config_activate_children(self, act); 3160 break; 3161 case DVACT_RESUME: 3162 rv = config_activate_children(self, act); 3163 if (ifp->if_flags & IFF_UP) 3164 dc_init(sc); 3165 break; 3166 } 3167 return (rv); 3168 } 3169 3170 int 3171 dc_detach(struct dc_softc *sc) 3172 { 3173 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 3174 int i; 3175 3176 dc_stop(sc, 1); 3177 3178 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 3179 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3180 3181 if (sc->dc_srom) 3182 free(sc->dc_srom, M_DEVBUF); 3183 3184 for (i = 0; i < DC_RX_LIST_CNT; i++) 3185 bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map); 3186 if (sc->sc_rx_sparemap) 3187 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_sparemap); 3188 for (i = 0; i < DC_TX_LIST_CNT; i++) 3189 bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_tx_chain[i].sd_map); 3190 if (sc->sc_tx_sparemap) 3191 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_sparemap); 3192 3193 /// XXX bus_dmamap_sync 3194 bus_dmamap_unload(sc->sc_dmat, sc->sc_listmap); 3195 bus_dmamem_unmap(sc->sc_dmat, sc->sc_listkva, sc->sc_listnseg); 3196 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap); 3197 bus_dmamem_free(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg); 3198 3199 ether_ifdetach(ifp); 3200 if_detach(ifp); 3201 return (0); 3202 } 3203 3204 struct cfdriver dc_cd = { 3205 0, "dc", DV_IFNET 3206 }; 3207