1 /* $OpenBSD: dc.c,v 1.103 2008/09/12 05:44:52 brad Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $ 35 */ 36 37 /* 38 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 39 * series chips and several workalikes including the following: 40 * 41 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 42 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 43 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 44 * ASIX Electronics AX88140A (www.asix.com.tw) 45 * ASIX Electronics AX88141 (www.asix.com.tw) 46 * ADMtek AL981 (www.admtek.com.tw) 47 * ADMtek AN983 (www.admtek.com.tw) 48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 49 * Accton EN1217, EN2242 (www.accton.com) 50 * Xircom X3201 (www.xircom.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64 /* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92 #include "bpfilter.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/mbuf.h> 97 #include <sys/protosw.h> 98 #include <sys/socket.h> 99 #include <sys/ioctl.h> 100 #include <sys/errno.h> 101 #include <sys/malloc.h> 102 #include <sys/kernel.h> 103 #include <sys/device.h> 104 #include <sys/timeout.h> 105 106 #include <net/if.h> 107 #include <net/if_dl.h> 108 #include <net/if_types.h> 109 110 #ifdef INET 111 #include <netinet/in.h> 112 #include <netinet/in_systm.h> 113 #include <netinet/in_var.h> 114 #include <netinet/ip.h> 115 #include <netinet/if_ether.h> 116 #endif 117 118 #include <net/if_media.h> 119 120 #if NBPFILTER > 0 121 #include <net/bpf.h> 122 #endif 123 124 #include <dev/mii/mii.h> 125 #include <dev/mii/miivar.h> 126 127 #include <machine/bus.h> 128 #include <dev/pci/pcidevs.h> 129 130 #include <dev/ic/dcreg.h> 131 132 int dc_intr(void *); 133 void dc_shutdown(void *); 134 void dc_power(int, void *); 135 struct dc_type *dc_devtype(void *); 136 int dc_newbuf(struct dc_softc *, int, struct mbuf *); 137 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *); 138 int dc_coal(struct dc_softc *, struct mbuf **); 139 140 void dc_pnic_rx_bug_war(struct dc_softc *, int); 141 int dc_rx_resync(struct dc_softc *); 142 void dc_rxeof(struct dc_softc *); 143 void dc_txeof(struct dc_softc *); 144 void dc_tick(void *); 145 void dc_tx_underrun(struct dc_softc *); 146 void dc_start(struct ifnet *); 147 int dc_ioctl(struct ifnet *, u_long, caddr_t); 148 void dc_init(void *); 149 void dc_stop(struct dc_softc *); 150 void dc_watchdog(struct ifnet *); 151 int dc_ifmedia_upd(struct ifnet *); 152 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *); 153 154 void dc_delay(struct dc_softc *); 155 void dc_eeprom_width(struct dc_softc *); 156 void dc_eeprom_idle(struct dc_softc *); 157 void dc_eeprom_putbyte(struct dc_softc *, int); 158 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *); 159 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *); 160 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *); 161 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int); 162 163 void dc_mii_writebit(struct dc_softc *, int); 164 int dc_mii_readbit(struct dc_softc *); 165 void dc_mii_sync(struct dc_softc *); 166 void dc_mii_send(struct dc_softc *, u_int32_t, int); 167 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *); 168 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *); 169 int dc_miibus_readreg(struct device *, int, int); 170 void dc_miibus_writereg(struct device *, int, int, int); 171 void dc_miibus_statchg(struct device *); 172 173 void dc_setcfg(struct dc_softc *, int); 174 u_int32_t dc_crc_le(struct dc_softc *, caddr_t); 175 u_int32_t dc_crc_be(caddr_t); 176 void dc_setfilt_21143(struct dc_softc *); 177 void dc_setfilt_asix(struct dc_softc *); 178 void dc_setfilt_admtek(struct dc_softc *); 179 void dc_setfilt_xircom(struct dc_softc *); 180 181 void dc_setfilt(struct dc_softc *); 182 183 void dc_reset(struct dc_softc *); 184 int dc_list_rx_init(struct dc_softc *); 185 int dc_list_tx_init(struct dc_softc *); 186 187 void dc_read_srom(struct dc_softc *, int); 188 void dc_parse_21143_srom(struct dc_softc *); 189 void dc_decode_leaf_sia(struct dc_softc *, 190 struct dc_eblock_sia *); 191 void dc_decode_leaf_mii(struct dc_softc *, 192 struct dc_eblock_mii *); 193 void dc_decode_leaf_sym(struct dc_softc *, 194 struct dc_eblock_sym *); 195 void dc_apply_fixup(struct dc_softc *, int); 196 197 #define DC_SETBIT(sc, reg, x) \ 198 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 199 200 #define DC_CLRBIT(sc, reg, x) \ 201 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 202 203 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 204 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 205 206 void 207 dc_delay(struct dc_softc *sc) 208 { 209 int idx; 210 211 for (idx = (300 / 33) + 1; idx > 0; idx--) 212 CSR_READ_4(sc, DC_BUSCTL); 213 } 214 215 void 216 dc_eeprom_width(struct dc_softc *sc) 217 { 218 int i; 219 220 /* Force EEPROM to idle state. */ 221 dc_eeprom_idle(sc); 222 223 /* Enter EEPROM access mode. */ 224 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 225 dc_delay(sc); 226 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 227 dc_delay(sc); 228 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 229 dc_delay(sc); 230 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 231 dc_delay(sc); 232 233 for (i = 3; i--;) { 234 if (6 & (1 << i)) 235 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 236 else 237 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 238 dc_delay(sc); 239 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 240 dc_delay(sc); 241 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 242 dc_delay(sc); 243 } 244 245 for (i = 1; i <= 12; i++) { 246 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 247 dc_delay(sc); 248 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 249 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 250 dc_delay(sc); 251 break; 252 } 253 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 254 dc_delay(sc); 255 } 256 257 /* Turn off EEPROM access mode. */ 258 dc_eeprom_idle(sc); 259 260 if (i < 4 || i > 12) 261 sc->dc_romwidth = 6; 262 else 263 sc->dc_romwidth = i; 264 265 /* Enter EEPROM access mode. */ 266 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 267 dc_delay(sc); 268 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 269 dc_delay(sc); 270 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 271 dc_delay(sc); 272 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 273 dc_delay(sc); 274 275 /* Turn off EEPROM access mode. */ 276 dc_eeprom_idle(sc); 277 } 278 279 void 280 dc_eeprom_idle(struct dc_softc *sc) 281 { 282 int i; 283 284 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 285 dc_delay(sc); 286 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 287 dc_delay(sc); 288 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 289 dc_delay(sc); 290 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 291 dc_delay(sc); 292 293 for (i = 0; i < 25; i++) { 294 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 295 dc_delay(sc); 296 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 297 dc_delay(sc); 298 } 299 300 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 301 dc_delay(sc); 302 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 303 dc_delay(sc); 304 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 305 } 306 307 /* 308 * Send a read command and address to the EEPROM, check for ACK. 309 */ 310 void 311 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 312 { 313 int d, i; 314 315 d = DC_EECMD_READ >> 6; 316 317 for (i = 3; i--; ) { 318 if (d & (1 << i)) 319 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 320 else 321 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 322 dc_delay(sc); 323 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 324 dc_delay(sc); 325 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 326 dc_delay(sc); 327 } 328 329 /* 330 * Feed in each bit and strobe the clock. 331 */ 332 for (i = sc->dc_romwidth; i--;) { 333 if (addr & (1 << i)) { 334 SIO_SET(DC_SIO_EE_DATAIN); 335 } else { 336 SIO_CLR(DC_SIO_EE_DATAIN); 337 } 338 dc_delay(sc); 339 SIO_SET(DC_SIO_EE_CLK); 340 dc_delay(sc); 341 SIO_CLR(DC_SIO_EE_CLK); 342 dc_delay(sc); 343 } 344 } 345 346 /* 347 * Read a word of data stored in the EEPROM at address 'addr.' 348 * The PNIC 82c168/82c169 has its own non-standard way to read 349 * the EEPROM. 350 */ 351 void 352 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 353 { 354 int i; 355 u_int32_t r; 356 357 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 358 359 for (i = 0; i < DC_TIMEOUT; i++) { 360 DELAY(1); 361 r = CSR_READ_4(sc, DC_SIO); 362 if (!(r & DC_PN_SIOCTL_BUSY)) { 363 *dest = (u_int16_t)(r & 0xFFFF); 364 return; 365 } 366 } 367 } 368 369 /* 370 * Read a word of data stored in the EEPROM at address 'addr.' 371 * The Xircom X3201 has its own non-standard way to read 372 * the EEPROM, too. 373 */ 374 void 375 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 376 { 377 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 378 379 addr *= 2; 380 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 381 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; 382 addr += 1; 383 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 384 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; 385 386 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 387 } 388 389 /* 390 * Read a word of data stored in the EEPROM at address 'addr.' 391 */ 392 void 393 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 394 { 395 int i; 396 u_int16_t word = 0; 397 398 /* Force EEPROM to idle state. */ 399 dc_eeprom_idle(sc); 400 401 /* Enter EEPROM access mode. */ 402 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 403 dc_delay(sc); 404 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 405 dc_delay(sc); 406 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 407 dc_delay(sc); 408 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 409 dc_delay(sc); 410 411 /* 412 * Send address of word we want to read. 413 */ 414 dc_eeprom_putbyte(sc, addr); 415 416 /* 417 * Start reading bits from EEPROM. 418 */ 419 for (i = 0x8000; i; i >>= 1) { 420 SIO_SET(DC_SIO_EE_CLK); 421 dc_delay(sc); 422 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 423 word |= i; 424 dc_delay(sc); 425 SIO_CLR(DC_SIO_EE_CLK); 426 dc_delay(sc); 427 } 428 429 /* Turn off EEPROM access mode. */ 430 dc_eeprom_idle(sc); 431 432 *dest = word; 433 } 434 435 /* 436 * Read a sequence of words from the EEPROM. 437 */ 438 void 439 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, 440 int swap) 441 { 442 int i; 443 u_int16_t word = 0, *ptr; 444 445 for (i = 0; i < cnt; i++) { 446 if (DC_IS_PNIC(sc)) 447 dc_eeprom_getword_pnic(sc, off + i, &word); 448 else if (DC_IS_XIRCOM(sc)) 449 dc_eeprom_getword_xircom(sc, off + i, &word); 450 else 451 dc_eeprom_getword(sc, off + i, &word); 452 ptr = (u_int16_t *)(dest + (i * 2)); 453 if (swap) 454 *ptr = betoh16(word); 455 else 456 *ptr = letoh16(word); 457 } 458 } 459 460 /* 461 * The following two routines are taken from the Macronix 98713 462 * Application Notes pp.19-21. 463 */ 464 /* 465 * Write a bit to the MII bus. 466 */ 467 void 468 dc_mii_writebit(struct dc_softc *sc, int bit) 469 { 470 if (bit) 471 CSR_WRITE_4(sc, DC_SIO, 472 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 473 else 474 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 475 476 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 477 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 478 } 479 480 /* 481 * Read a bit from the MII bus. 482 */ 483 int 484 dc_mii_readbit(struct dc_softc *sc) 485 { 486 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 487 CSR_READ_4(sc, DC_SIO); 488 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 489 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 490 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 491 return (1); 492 return (0); 493 } 494 495 /* 496 * Sync the PHYs by setting data bit and strobing the clock 32 times. 497 */ 498 void 499 dc_mii_sync(struct dc_softc *sc) 500 { 501 int i; 502 503 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 504 505 for (i = 0; i < 32; i++) 506 dc_mii_writebit(sc, 1); 507 } 508 509 /* 510 * Clock a series of bits through the MII. 511 */ 512 void 513 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 514 { 515 int i; 516 517 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 518 dc_mii_writebit(sc, bits & i); 519 } 520 521 /* 522 * Read an PHY register through the MII. 523 */ 524 int 525 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 526 { 527 int i, ack, s; 528 529 s = splnet(); 530 531 /* 532 * Set up frame for RX. 533 */ 534 frame->mii_stdelim = DC_MII_STARTDELIM; 535 frame->mii_opcode = DC_MII_READOP; 536 frame->mii_turnaround = 0; 537 frame->mii_data = 0; 538 539 /* 540 * Sync the PHYs. 541 */ 542 dc_mii_sync(sc); 543 544 /* 545 * Send command/address info. 546 */ 547 dc_mii_send(sc, frame->mii_stdelim, 2); 548 dc_mii_send(sc, frame->mii_opcode, 2); 549 dc_mii_send(sc, frame->mii_phyaddr, 5); 550 dc_mii_send(sc, frame->mii_regaddr, 5); 551 552 #ifdef notdef 553 /* Idle bit */ 554 dc_mii_writebit(sc, 1); 555 dc_mii_writebit(sc, 0); 556 #endif 557 558 /* Check for ack */ 559 ack = dc_mii_readbit(sc); 560 561 /* 562 * Now try reading data bits. If the ack failed, we still 563 * need to clock through 16 cycles to keep the PHY(s) in sync. 564 */ 565 if (ack) { 566 for(i = 0; i < 16; i++) { 567 dc_mii_readbit(sc); 568 } 569 goto fail; 570 } 571 572 for (i = 0x8000; i; i >>= 1) { 573 if (!ack) { 574 if (dc_mii_readbit(sc)) 575 frame->mii_data |= i; 576 } 577 } 578 579 fail: 580 581 dc_mii_writebit(sc, 0); 582 dc_mii_writebit(sc, 0); 583 584 splx(s); 585 586 if (ack) 587 return (1); 588 return (0); 589 } 590 591 /* 592 * Write to a PHY register through the MII. 593 */ 594 int 595 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 596 { 597 int s; 598 599 s = splnet(); 600 /* 601 * Set up frame for TX. 602 */ 603 604 frame->mii_stdelim = DC_MII_STARTDELIM; 605 frame->mii_opcode = DC_MII_WRITEOP; 606 frame->mii_turnaround = DC_MII_TURNAROUND; 607 608 /* 609 * Sync the PHYs. 610 */ 611 dc_mii_sync(sc); 612 613 dc_mii_send(sc, frame->mii_stdelim, 2); 614 dc_mii_send(sc, frame->mii_opcode, 2); 615 dc_mii_send(sc, frame->mii_phyaddr, 5); 616 dc_mii_send(sc, frame->mii_regaddr, 5); 617 dc_mii_send(sc, frame->mii_turnaround, 2); 618 dc_mii_send(sc, frame->mii_data, 16); 619 620 /* Idle bit. */ 621 dc_mii_writebit(sc, 0); 622 dc_mii_writebit(sc, 0); 623 624 splx(s); 625 return (0); 626 } 627 628 int 629 dc_miibus_readreg(struct device *self, int phy, int reg) 630 { 631 struct dc_mii_frame frame; 632 struct dc_softc *sc = (struct dc_softc *)self; 633 int i, rval, phy_reg; 634 635 /* 636 * Note: both the AL981 and AN983 have internal PHYs, 637 * however the AL981 provides direct access to the PHY 638 * registers while the AN983 uses a serial MII interface. 639 * The AN983's MII interface is also buggy in that you 640 * can read from any MII address (0 to 31), but only address 1 641 * behaves normally. To deal with both cases, we pretend 642 * that the PHY is at MII address 1. 643 */ 644 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 645 return (0); 646 647 /* 648 * Note: the ukphy probs of the RS7112 report a PHY at 649 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 650 * so we only respond to correct one. 651 */ 652 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 653 return (0); 654 655 if (sc->dc_pmode != DC_PMODE_MII) { 656 if (phy == (MII_NPHY - 1)) { 657 switch(reg) { 658 case MII_BMSR: 659 /* 660 * Fake something to make the probe 661 * code think there's a PHY here. 662 */ 663 return (BMSR_MEDIAMASK); 664 break; 665 case MII_PHYIDR1: 666 if (DC_IS_PNIC(sc)) 667 return (PCI_VENDOR_LITEON); 668 return (PCI_VENDOR_DEC); 669 break; 670 case MII_PHYIDR2: 671 if (DC_IS_PNIC(sc)) 672 return (PCI_PRODUCT_LITEON_PNIC); 673 return (PCI_PRODUCT_DEC_21142); 674 break; 675 default: 676 return (0); 677 break; 678 } 679 } else 680 return (0); 681 } 682 683 if (DC_IS_PNIC(sc)) { 684 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 685 (phy << 23) | (reg << 18)); 686 for (i = 0; i < DC_TIMEOUT; i++) { 687 DELAY(1); 688 rval = CSR_READ_4(sc, DC_PN_MII); 689 if (!(rval & DC_PN_MII_BUSY)) { 690 rval &= 0xFFFF; 691 return (rval == 0xFFFF ? 0 : rval); 692 } 693 } 694 return (0); 695 } 696 697 if (DC_IS_COMET(sc)) { 698 switch(reg) { 699 case MII_BMCR: 700 phy_reg = DC_AL_BMCR; 701 break; 702 case MII_BMSR: 703 phy_reg = DC_AL_BMSR; 704 break; 705 case MII_PHYIDR1: 706 phy_reg = DC_AL_VENID; 707 break; 708 case MII_PHYIDR2: 709 phy_reg = DC_AL_DEVID; 710 break; 711 case MII_ANAR: 712 phy_reg = DC_AL_ANAR; 713 break; 714 case MII_ANLPAR: 715 phy_reg = DC_AL_LPAR; 716 break; 717 case MII_ANER: 718 phy_reg = DC_AL_ANER; 719 break; 720 default: 721 printf("%s: phy_read: bad phy register %x\n", 722 sc->sc_dev.dv_xname, reg); 723 return (0); 724 break; 725 } 726 727 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 728 729 if (rval == 0xFFFF) 730 return (0); 731 return (rval); 732 } 733 734 bzero(&frame, sizeof(frame)); 735 736 frame.mii_phyaddr = phy; 737 frame.mii_regaddr = reg; 738 if (sc->dc_type == DC_TYPE_98713) { 739 phy_reg = CSR_READ_4(sc, DC_NETCFG); 740 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 741 } 742 dc_mii_readreg(sc, &frame); 743 if (sc->dc_type == DC_TYPE_98713) 744 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 745 746 return (frame.mii_data); 747 } 748 749 void 750 dc_miibus_writereg(struct device *self, int phy, int reg, int data) 751 { 752 struct dc_softc *sc = (struct dc_softc *)self; 753 struct dc_mii_frame frame; 754 int i, phy_reg; 755 756 bzero((char *)&frame, sizeof(frame)); 757 758 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 759 return; 760 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 761 return; 762 763 if (DC_IS_PNIC(sc)) { 764 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 765 (phy << 23) | (reg << 10) | data); 766 for (i = 0; i < DC_TIMEOUT; i++) { 767 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 768 break; 769 } 770 return; 771 } 772 773 if (DC_IS_COMET(sc)) { 774 switch(reg) { 775 case MII_BMCR: 776 phy_reg = DC_AL_BMCR; 777 break; 778 case MII_BMSR: 779 phy_reg = DC_AL_BMSR; 780 break; 781 case MII_PHYIDR1: 782 phy_reg = DC_AL_VENID; 783 break; 784 case MII_PHYIDR2: 785 phy_reg = DC_AL_DEVID; 786 break; 787 case MII_ANAR: 788 phy_reg = DC_AL_ANAR; 789 break; 790 case MII_ANLPAR: 791 phy_reg = DC_AL_LPAR; 792 break; 793 case MII_ANER: 794 phy_reg = DC_AL_ANER; 795 break; 796 default: 797 printf("%s: phy_write: bad phy register %x\n", 798 sc->sc_dev.dv_xname, reg); 799 return; 800 break; 801 } 802 803 CSR_WRITE_4(sc, phy_reg, data); 804 return; 805 } 806 807 frame.mii_phyaddr = phy; 808 frame.mii_regaddr = reg; 809 frame.mii_data = data; 810 811 if (sc->dc_type == DC_TYPE_98713) { 812 phy_reg = CSR_READ_4(sc, DC_NETCFG); 813 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 814 } 815 dc_mii_writereg(sc, &frame); 816 if (sc->dc_type == DC_TYPE_98713) 817 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 818 } 819 820 void 821 dc_miibus_statchg(struct device *self) 822 { 823 struct dc_softc *sc = (struct dc_softc *)self; 824 struct mii_data *mii; 825 struct ifmedia *ifm; 826 827 if (DC_IS_ADMTEK(sc)) 828 return; 829 830 mii = &sc->sc_mii; 831 ifm = &mii->mii_media; 832 if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 833 dc_setcfg(sc, ifm->ifm_media); 834 sc->dc_if_media = ifm->ifm_media; 835 } else { 836 dc_setcfg(sc, mii->mii_media_active); 837 sc->dc_if_media = mii->mii_media_active; 838 } 839 } 840 841 #define DC_BITS_512 9 842 #define DC_BITS_128 7 843 #define DC_BITS_64 6 844 845 u_int32_t 846 dc_crc_le(struct dc_softc *sc, caddr_t addr) 847 { 848 u_int32_t crc; 849 850 /* Compute CRC for the address value. */ 851 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 852 853 /* 854 * The hash table on the PNIC II and the MX98715AEC-C/D/E 855 * chips is only 128 bits wide. 856 */ 857 if (sc->dc_flags & DC_128BIT_HASH) 858 return (crc & ((1 << DC_BITS_128) - 1)); 859 860 /* The hash table on the MX98715BEC is only 64 bits wide. */ 861 if (sc->dc_flags & DC_64BIT_HASH) 862 return (crc & ((1 << DC_BITS_64) - 1)); 863 864 /* Xircom's hash filtering table is different (read: weird) */ 865 /* Xircom uses the LEAST significant bits */ 866 if (DC_IS_XIRCOM(sc)) { 867 if ((crc & 0x180) == 0x180) 868 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); 869 else 870 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); 871 } 872 873 return (crc & ((1 << DC_BITS_512) - 1)); 874 } 875 876 /* 877 * Calculate CRC of a multicast group address, return the lower 6 bits. 878 */ 879 #define dc_crc_be(addr) ((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \ 880 & 0x0000003F) 881 882 /* 883 * 21143-style RX filter setup routine. Filter programming is done by 884 * downloading a special setup frame into the TX engine. 21143, Macronix, 885 * PNIC, PNIC II and Davicom chips are programmed this way. 886 * 887 * We always program the chip using 'hash perfect' mode, i.e. one perfect 888 * address (our node address) and a 512-bit hash filter for multicast 889 * frames. We also sneak the broadcast address into the hash filter since 890 * we need that too. 891 */ 892 void 893 dc_setfilt_21143(struct dc_softc *sc) 894 { 895 struct dc_desc *sframe; 896 u_int32_t h, *sp; 897 struct arpcom *ac = &sc->sc_arpcom; 898 struct ether_multi *enm; 899 struct ether_multistep step; 900 struct ifnet *ifp; 901 int i; 902 903 ifp = &sc->sc_arpcom.ac_if; 904 905 i = sc->dc_cdata.dc_tx_prod; 906 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 907 sc->dc_cdata.dc_tx_cnt++; 908 sframe = &sc->dc_ldata->dc_tx_list[i]; 909 sp = &sc->dc_ldata->dc_sbuf[0]; 910 bzero((char *)sp, DC_SFRAME_LEN); 911 912 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr + 913 offsetof(struct dc_list_data, dc_sbuf)); 914 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 915 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 916 917 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = 918 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0]; 919 920 /* If we want promiscuous mode, set the allframes bit. */ 921 if (ifp->if_flags & IFF_PROMISC) 922 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 923 else 924 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 925 926 allmulti: 927 if (ifp->if_flags & IFF_ALLMULTI) 928 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 929 else { 930 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 931 932 ETHER_FIRST_MULTI(step, ac, enm); 933 while (enm != NULL) { 934 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 935 ETHER_ADDR_LEN)) { 936 ifp->if_flags |= IFF_ALLMULTI; 937 goto allmulti; 938 } 939 940 h = dc_crc_le(sc, enm->enm_addrlo); 941 sp[h >> 4] |= htole32(1 << (h & 0xF)); 942 ETHER_NEXT_MULTI(step, enm); 943 } 944 } 945 946 if (ifp->if_flags & IFF_BROADCAST) { 947 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 948 sp[h >> 4] |= htole32(1 << (h & 0xF)); 949 } 950 951 /* Set our MAC address */ 952 sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0); 953 sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1); 954 sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2); 955 956 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 957 offsetof(struct dc_list_data, dc_sbuf[0]), 958 sizeof(struct dc_list_data) - 959 offsetof(struct dc_list_data, dc_sbuf[0]), 960 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 961 962 sframe->dc_status = htole32(DC_TXSTAT_OWN); 963 964 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 965 offsetof(struct dc_list_data, dc_tx_list[i]), 966 sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 967 968 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 969 970 /* 971 * The PNIC takes an exceedingly long time to process its 972 * setup frame; wait 10ms after posting the setup frame 973 * before proceeding, just so it has time to swallow its 974 * medicine. 975 */ 976 DELAY(10000); 977 978 ifp->if_timer = 5; 979 } 980 981 void 982 dc_setfilt_admtek(struct dc_softc *sc) 983 { 984 struct ifnet *ifp; 985 struct arpcom *ac = &sc->sc_arpcom; 986 struct ether_multi *enm; 987 struct ether_multistep step; 988 int h = 0; 989 u_int32_t hashes[2] = { 0, 0 }; 990 991 ifp = &sc->sc_arpcom.ac_if; 992 993 /* Init our MAC address */ 994 CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 | 995 ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]); 996 CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]); 997 998 /* If we want promiscuous mode, set the allframes bit. */ 999 if (ifp->if_flags & IFF_PROMISC) 1000 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1001 else 1002 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1003 1004 allmulti: 1005 if (ifp->if_flags & IFF_ALLMULTI) 1006 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1007 else 1008 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1009 1010 /* first, zot all the existing hash bits */ 1011 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1012 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1013 1014 /* 1015 * If we're already in promisc or allmulti mode, we 1016 * don't have to bother programming the multicast filter. 1017 */ 1018 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1019 return; 1020 1021 /* now program new ones */ 1022 ETHER_FIRST_MULTI(step, ac, enm); 1023 while (enm != NULL) { 1024 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1025 ifp->if_flags |= IFF_ALLMULTI; 1026 goto allmulti; 1027 } 1028 1029 if (DC_IS_CENTAUR(sc)) 1030 h = dc_crc_le(sc, enm->enm_addrlo); 1031 else 1032 h = dc_crc_be(enm->enm_addrlo); 1033 if (h < 32) 1034 hashes[0] |= (1 << h); 1035 else 1036 hashes[1] |= (1 << (h - 32)); 1037 ETHER_NEXT_MULTI(step, enm); 1038 } 1039 1040 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1041 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1042 } 1043 1044 void 1045 dc_setfilt_asix(struct dc_softc *sc) 1046 { 1047 struct ifnet *ifp; 1048 struct arpcom *ac = &sc->sc_arpcom; 1049 struct ether_multi *enm; 1050 struct ether_multistep step; 1051 int h = 0; 1052 u_int32_t hashes[2] = { 0, 0 }; 1053 1054 ifp = &sc->sc_arpcom.ac_if; 1055 1056 /* Init our MAC address */ 1057 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1058 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1059 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0])); 1060 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1061 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1062 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4])); 1063 1064 /* If we want promiscuous mode, set the allframes bit. */ 1065 if (ifp->if_flags & IFF_PROMISC) 1066 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1067 else 1068 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1069 1070 if (ifp->if_flags & IFF_ALLMULTI) 1071 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1072 else 1073 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1074 1075 /* 1076 * The ASIX chip has a special bit to enable reception 1077 * of broadcast frames. 1078 */ 1079 if (ifp->if_flags & IFF_BROADCAST) 1080 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1081 else 1082 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1083 1084 /* first, zot all the existing hash bits */ 1085 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1086 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1087 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1088 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1089 1090 /* 1091 * If we're already in promisc or allmulti mode, we 1092 * don't have to bother programming the multicast filter. 1093 */ 1094 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1095 return; 1096 1097 /* now program new ones */ 1098 ETHER_FIRST_MULTI(step, ac, enm); 1099 while (enm != NULL) { 1100 h = dc_crc_be(enm->enm_addrlo); 1101 if (h < 32) 1102 hashes[0] |= (1 << h); 1103 else 1104 hashes[1] |= (1 << (h - 32)); 1105 ETHER_NEXT_MULTI(step, enm); 1106 } 1107 1108 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1109 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1110 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1111 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1112 } 1113 1114 void 1115 dc_setfilt_xircom(struct dc_softc *sc) 1116 { 1117 struct dc_desc *sframe; 1118 struct arpcom *ac = &sc->sc_arpcom; 1119 struct ether_multi *enm; 1120 struct ether_multistep step; 1121 u_int32_t h, *sp; 1122 struct ifnet *ifp; 1123 int i; 1124 1125 ifp = &sc->sc_arpcom.ac_if; 1126 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1127 1128 i = sc->dc_cdata.dc_tx_prod; 1129 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1130 sc->dc_cdata.dc_tx_cnt++; 1131 sframe = &sc->dc_ldata->dc_tx_list[i]; 1132 sp = &sc->dc_ldata->dc_sbuf[0]; 1133 bzero((char *)sp, DC_SFRAME_LEN); 1134 1135 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr + 1136 offsetof(struct dc_list_data, dc_sbuf)); 1137 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1138 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1139 1140 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = 1141 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0]; 1142 1143 /* If we want promiscuous mode, set the allframes bit. */ 1144 if (ifp->if_flags & IFF_PROMISC) 1145 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1146 else 1147 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1148 1149 if (ifp->if_flags & IFF_ALLMULTI) 1150 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1151 else 1152 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1153 1154 /* now program new ones */ 1155 ETHER_FIRST_MULTI(step, ac, enm); 1156 while (enm != NULL) { 1157 h = dc_crc_le(sc, enm->enm_addrlo); 1158 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1159 ETHER_NEXT_MULTI(step, enm); 1160 } 1161 1162 if (ifp->if_flags & IFF_BROADCAST) { 1163 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1164 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1165 } 1166 1167 /* Set our MAC address */ 1168 sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0); 1169 sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1); 1170 sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2); 1171 1172 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1173 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1174 ifp->if_flags |= IFF_RUNNING; 1175 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1176 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1177 1178 /* 1179 * wait some time... 1180 */ 1181 DELAY(1000); 1182 1183 ifp->if_timer = 5; 1184 } 1185 1186 void 1187 dc_setfilt(struct dc_softc *sc) 1188 { 1189 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1190 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1191 dc_setfilt_21143(sc); 1192 1193 if (DC_IS_ASIX(sc)) 1194 dc_setfilt_asix(sc); 1195 1196 if (DC_IS_ADMTEK(sc)) 1197 dc_setfilt_admtek(sc); 1198 1199 if (DC_IS_XIRCOM(sc)) 1200 dc_setfilt_xircom(sc); 1201 } 1202 1203 /* 1204 * In order to fiddle with the 1205 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1206 * first have to put the transmit and/or receive logic in the idle state. 1207 */ 1208 void 1209 dc_setcfg(struct dc_softc *sc, int media) 1210 { 1211 int i, restart = 0; 1212 u_int32_t isr; 1213 1214 if (IFM_SUBTYPE(media) == IFM_NONE) 1215 return; 1216 1217 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1218 restart = 1; 1219 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1220 1221 for (i = 0; i < DC_TIMEOUT; i++) { 1222 isr = CSR_READ_4(sc, DC_ISR); 1223 if (isr & DC_ISR_TX_IDLE && 1224 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1225 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1226 break; 1227 DELAY(10); 1228 } 1229 1230 if (i == DC_TIMEOUT) { 1231 if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc)) 1232 printf("%s: failed to force tx to idle state\n", 1233 sc->sc_dev.dv_xname); 1234 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1235 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && 1236 !DC_HAS_BROKEN_RXSTATE(sc)) 1237 printf("%s: failed to force rx to idle state\n", 1238 sc->sc_dev.dv_xname); 1239 } 1240 } 1241 1242 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1243 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1244 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1245 if (sc->dc_pmode == DC_PMODE_MII) { 1246 int watchdogreg; 1247 1248 if (DC_IS_INTEL(sc)) { 1249 /* there's a write enable bit here that reads as 1 */ 1250 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1251 watchdogreg &= ~DC_WDOG_CTLWREN; 1252 watchdogreg |= DC_WDOG_JABBERDIS; 1253 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1254 } else { 1255 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1256 } 1257 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1258 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1259 if (sc->dc_type == DC_TYPE_98713) 1260 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1261 DC_NETCFG_SCRAMBLER)); 1262 if (!DC_IS_DAVICOM(sc)) 1263 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1264 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1265 if (DC_IS_INTEL(sc)) 1266 dc_apply_fixup(sc, IFM_AUTO); 1267 } else { 1268 if (DC_IS_PNIC(sc)) { 1269 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1270 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1271 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1272 } 1273 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1274 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1275 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1276 if (DC_IS_INTEL(sc)) 1277 dc_apply_fixup(sc, 1278 (media & IFM_GMASK) == IFM_FDX ? 1279 IFM_100_TX|IFM_FDX : IFM_100_TX); 1280 } 1281 } 1282 1283 if (IFM_SUBTYPE(media) == IFM_10_T) { 1284 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1285 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1286 if (sc->dc_pmode == DC_PMODE_MII) { 1287 int watchdogreg; 1288 1289 if (DC_IS_INTEL(sc)) { 1290 /* there's a write enable bit here that reads as 1 */ 1291 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1292 watchdogreg &= ~DC_WDOG_CTLWREN; 1293 watchdogreg |= DC_WDOG_JABBERDIS; 1294 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1295 } else { 1296 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1297 } 1298 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1299 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1300 if (sc->dc_type == DC_TYPE_98713) 1301 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1302 if (!DC_IS_DAVICOM(sc)) 1303 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1304 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1305 if (DC_IS_INTEL(sc)) 1306 dc_apply_fixup(sc, IFM_AUTO); 1307 } else { 1308 if (DC_IS_PNIC(sc)) { 1309 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1310 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1311 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1312 } 1313 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1314 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1315 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1316 if (DC_IS_INTEL(sc)) { 1317 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1318 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1319 if ((media & IFM_GMASK) == IFM_FDX) 1320 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1321 else 1322 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1323 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1324 DC_CLRBIT(sc, DC_10BTCTRL, 1325 DC_TCTL_AUTONEGENBL); 1326 dc_apply_fixup(sc, 1327 (media & IFM_GMASK) == IFM_FDX ? 1328 IFM_10_T|IFM_FDX : IFM_10_T); 1329 DELAY(20000); 1330 } 1331 } 1332 } 1333 1334 /* 1335 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1336 * PHY and we want HomePNA mode, set the portsel bit to turn 1337 * on the external MII port. 1338 */ 1339 if (DC_IS_DAVICOM(sc)) { 1340 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1341 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1342 sc->dc_link = 1; 1343 } else { 1344 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1345 } 1346 } 1347 1348 if ((media & IFM_GMASK) == IFM_FDX) { 1349 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1350 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1351 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1352 } else { 1353 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1354 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1355 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1356 } 1357 1358 if (restart) 1359 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1360 } 1361 1362 void 1363 dc_reset(struct dc_softc *sc) 1364 { 1365 int i; 1366 1367 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1368 1369 for (i = 0; i < DC_TIMEOUT; i++) { 1370 DELAY(10); 1371 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1372 break; 1373 } 1374 1375 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || 1376 DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) { 1377 DELAY(10000); 1378 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1379 i = 0; 1380 } 1381 1382 if (i == DC_TIMEOUT) 1383 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 1384 1385 /* Wait a little while for the chip to get its brains in order. */ 1386 DELAY(1000); 1387 1388 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1389 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1390 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1391 1392 /* 1393 * Bring the SIA out of reset. In some cases, it looks 1394 * like failing to unreset the SIA soon enough gets it 1395 * into a state where it will never come out of reset 1396 * until we reset the whole chip again. 1397 */ 1398 if (DC_IS_INTEL(sc)) { 1399 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1400 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1401 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1402 } 1403 1404 if (sc->dc_type == DC_TYPE_21145) 1405 dc_setcfg(sc, IFM_10_T); 1406 } 1407 1408 void 1409 dc_apply_fixup(struct dc_softc *sc, int media) 1410 { 1411 struct dc_mediainfo *m; 1412 u_int8_t *p; 1413 int i; 1414 u_int32_t reg; 1415 1416 m = sc->dc_mi; 1417 1418 while (m != NULL) { 1419 if (m->dc_media == media) 1420 break; 1421 m = m->dc_next; 1422 } 1423 1424 if (m == NULL) 1425 return; 1426 1427 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1428 reg = (p[0] | (p[1] << 8)) << 16; 1429 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1430 } 1431 1432 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1433 reg = (p[0] | (p[1] << 8)) << 16; 1434 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1435 } 1436 } 1437 1438 void 1439 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1440 { 1441 struct dc_mediainfo *m; 1442 1443 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1444 if (m == NULL) 1445 return; 1446 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { 1447 case DC_SIA_CODE_10BT: 1448 m->dc_media = IFM_10_T; 1449 break; 1450 case DC_SIA_CODE_10BT_FDX: 1451 m->dc_media = IFM_10_T|IFM_FDX; 1452 break; 1453 case DC_SIA_CODE_10B2: 1454 m->dc_media = IFM_10_2; 1455 break; 1456 case DC_SIA_CODE_10B5: 1457 m->dc_media = IFM_10_5; 1458 break; 1459 default: 1460 break; 1461 } 1462 1463 /* 1464 * We need to ignore CSR13, CSR14, CSR15 for SIA mode. 1465 * Things apparently already work for cards that do 1466 * supply Media Specific Data. 1467 */ 1468 if (l->dc_sia_code & DC_SIA_CODE_EXT) { 1469 m->dc_gp_len = 2; 1470 m->dc_gp_ptr = 1471 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1472 } else { 1473 m->dc_gp_len = 2; 1474 m->dc_gp_ptr = 1475 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1476 } 1477 1478 m->dc_next = sc->dc_mi; 1479 sc->dc_mi = m; 1480 1481 sc->dc_pmode = DC_PMODE_SIA; 1482 } 1483 1484 void 1485 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1486 { 1487 struct dc_mediainfo *m; 1488 1489 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1490 if (m == NULL) 1491 return; 1492 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1493 m->dc_media = IFM_100_TX; 1494 1495 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1496 m->dc_media = IFM_100_TX|IFM_FDX; 1497 1498 m->dc_gp_len = 2; 1499 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1500 1501 m->dc_next = sc->dc_mi; 1502 sc->dc_mi = m; 1503 1504 sc->dc_pmode = DC_PMODE_SYM; 1505 } 1506 1507 void 1508 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1509 { 1510 u_int8_t *p; 1511 struct dc_mediainfo *m; 1512 1513 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1514 if (m == NULL) 1515 return; 1516 /* We abuse IFM_AUTO to represent MII. */ 1517 m->dc_media = IFM_AUTO; 1518 m->dc_gp_len = l->dc_gpr_len; 1519 1520 p = (u_int8_t *)l; 1521 p += sizeof(struct dc_eblock_mii); 1522 m->dc_gp_ptr = p; 1523 p += 2 * l->dc_gpr_len; 1524 m->dc_reset_len = *p; 1525 p++; 1526 m->dc_reset_ptr = p; 1527 1528 m->dc_next = sc->dc_mi; 1529 sc->dc_mi = m; 1530 } 1531 1532 void 1533 dc_read_srom(struct dc_softc *sc, int bits) 1534 { 1535 int size; 1536 1537 size = 2 << bits; 1538 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1539 if (sc->dc_srom == NULL) 1540 return; 1541 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1542 } 1543 1544 void 1545 dc_parse_21143_srom(struct dc_softc *sc) 1546 { 1547 struct dc_leaf_hdr *lhdr; 1548 struct dc_eblock_hdr *hdr; 1549 int have_mii, i, loff; 1550 char *ptr; 1551 1552 have_mii = 0; 1553 loff = sc->dc_srom[27]; 1554 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1555 1556 ptr = (char *)lhdr; 1557 ptr += sizeof(struct dc_leaf_hdr) - 1; 1558 /* 1559 * Look if we got a MII media block. 1560 */ 1561 for (i = 0; i < lhdr->dc_mcnt; i++) { 1562 hdr = (struct dc_eblock_hdr *)ptr; 1563 if (hdr->dc_type == DC_EBLOCK_MII) 1564 have_mii++; 1565 1566 ptr += (hdr->dc_len & 0x7F); 1567 ptr++; 1568 } 1569 1570 /* 1571 * Do the same thing again. Only use SIA and SYM media 1572 * blocks if no MII media block is available. 1573 */ 1574 ptr = (char *)lhdr; 1575 ptr += sizeof(struct dc_leaf_hdr) - 1; 1576 for (i = 0; i < lhdr->dc_mcnt; i++) { 1577 hdr = (struct dc_eblock_hdr *)ptr; 1578 switch(hdr->dc_type) { 1579 case DC_EBLOCK_MII: 1580 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1581 break; 1582 case DC_EBLOCK_SIA: 1583 if (! have_mii) 1584 dc_decode_leaf_sia(sc, 1585 (struct dc_eblock_sia *)hdr); 1586 break; 1587 case DC_EBLOCK_SYM: 1588 if (! have_mii) 1589 dc_decode_leaf_sym(sc, 1590 (struct dc_eblock_sym *)hdr); 1591 break; 1592 default: 1593 /* Don't care. Yet. */ 1594 break; 1595 } 1596 ptr += (hdr->dc_len & 0x7F); 1597 ptr++; 1598 } 1599 } 1600 1601 /* 1602 * Attach the interface. Allocate softc structures, do ifmedia 1603 * setup and ethernet/BPF attach. 1604 */ 1605 void 1606 dc_attach(struct dc_softc *sc) 1607 { 1608 struct ifnet *ifp; 1609 int mac_offset, tmp, i; 1610 u_int32_t reg; 1611 1612 /* 1613 * Get station address from the EEPROM. 1614 */ 1615 if (sc->sc_hasmac) 1616 goto hasmac; 1617 1618 switch(sc->dc_type) { 1619 case DC_TYPE_98713: 1620 case DC_TYPE_98713A: 1621 case DC_TYPE_987x5: 1622 case DC_TYPE_PNICII: 1623 dc_read_eeprom(sc, (caddr_t)&mac_offset, 1624 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 1625 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 1626 (mac_offset / 2), 3, 0); 1627 break; 1628 case DC_TYPE_PNIC: 1629 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1); 1630 break; 1631 case DC_TYPE_DM9102: 1632 case DC_TYPE_21143: 1633 case DC_TYPE_21145: 1634 case DC_TYPE_ASIX: 1635 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 1636 DC_EE_NODEADDR, 3, 0); 1637 break; 1638 case DC_TYPE_AL981: 1639 case DC_TYPE_AN983: 1640 reg = CSR_READ_4(sc, DC_AL_PAR0); 1641 sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff); 1642 sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff; 1643 sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff; 1644 sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff; 1645 reg = CSR_READ_4(sc, DC_AL_PAR1); 1646 sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff); 1647 sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff; 1648 break; 1649 case DC_TYPE_CONEXANT: 1650 bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR, 1651 &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 1652 break; 1653 case DC_TYPE_XIRCOM: 1654 break; 1655 default: 1656 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 1657 DC_EE_NODEADDR, 3, 0); 1658 break; 1659 } 1660 hasmac: 1661 1662 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data), 1663 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 1664 BUS_DMA_NOWAIT) != 0) { 1665 printf(": can't alloc list mem\n"); 1666 goto fail; 1667 } 1668 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 1669 sizeof(struct dc_list_data), &sc->sc_listkva, 1670 BUS_DMA_NOWAIT) != 0) { 1671 printf(": can't map list mem\n"); 1672 goto fail; 1673 } 1674 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1, 1675 sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT, 1676 &sc->sc_listmap) != 0) { 1677 printf(": can't alloc list map\n"); 1678 goto fail; 1679 } 1680 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 1681 sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 1682 printf(": can't load list map\n"); 1683 goto fail; 1684 } 1685 sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva; 1686 bzero(sc->dc_ldata, sizeof(struct dc_list_data)); 1687 1688 for (i = 0; i < DC_RX_LIST_CNT; i++) { 1689 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1690 0, BUS_DMA_NOWAIT, 1691 &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) { 1692 printf(": can't create rx map\n"); 1693 return; 1694 } 1695 } 1696 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1697 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 1698 printf(": can't create rx spare map\n"); 1699 return; 1700 } 1701 1702 for (i = 0; i < DC_TX_LIST_CNT; i++) { 1703 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1704 DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT, 1705 &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) { 1706 printf(": can't create tx map\n"); 1707 return; 1708 } 1709 } 1710 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5, 1711 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 1712 printf(": can't create tx spare map\n"); 1713 return; 1714 } 1715 1716 /* 1717 * A 21143 or clone chip was detected. Inform the world. 1718 */ 1719 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 1720 1721 ifp = &sc->sc_arpcom.ac_if; 1722 ifp->if_softc = sc; 1723 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1724 ifp->if_ioctl = dc_ioctl; 1725 ifp->if_start = dc_start; 1726 ifp->if_watchdog = dc_watchdog; 1727 ifp->if_baudrate = 10000000; 1728 IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); 1729 IFQ_SET_READY(&ifp->if_snd); 1730 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1731 1732 ifp->if_capabilities = IFCAP_VLAN_MTU; 1733 1734 /* Do MII setup. If this is a 21143, check for a PHY on the 1735 * MII bus after applying any necessary fixups to twiddle the 1736 * GPIO bits. If we don't end up finding a PHY, restore the 1737 * old selection (SIA only or SIA/SYM) and attach the dcphy 1738 * driver instead. 1739 */ 1740 if (DC_IS_INTEL(sc)) { 1741 dc_apply_fixup(sc, IFM_AUTO); 1742 tmp = sc->dc_pmode; 1743 sc->dc_pmode = DC_PMODE_MII; 1744 } 1745 1746 /* 1747 * Setup General Purpose port mode and data so the tulip can talk 1748 * to the MII. This needs to be done before mii_attach so that 1749 * we can actually see them. 1750 */ 1751 if (DC_IS_XIRCOM(sc)) { 1752 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 1753 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 1754 DELAY(10); 1755 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 1756 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 1757 DELAY(10); 1758 } 1759 1760 sc->sc_mii.mii_ifp = ifp; 1761 sc->sc_mii.mii_readreg = dc_miibus_readreg; 1762 sc->sc_mii.mii_writereg = dc_miibus_writereg; 1763 sc->sc_mii.mii_statchg = dc_miibus_statchg; 1764 ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts); 1765 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1766 MII_OFFSET_ANY, 0); 1767 1768 if (DC_IS_INTEL(sc)) { 1769 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1770 sc->dc_pmode = tmp; 1771 if (sc->dc_pmode != DC_PMODE_SIA) 1772 sc->dc_pmode = DC_PMODE_SYM; 1773 sc->dc_flags |= DC_21143_NWAY; 1774 if (sc->dc_flags & DC_MOMENCO_BOTCH) 1775 sc->dc_pmode = DC_PMODE_MII; 1776 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, 1777 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1778 } else { 1779 /* we have a PHY, so we must clear this bit */ 1780 sc->dc_flags &= ~DC_TULIP_LEDS; 1781 } 1782 } 1783 1784 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1785 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1786 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1787 printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname); 1788 } else if (sc->dc_type == DC_TYPE_21145) { 1789 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); 1790 } else 1791 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1792 1793 if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A) 1794 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL); 1795 1796 if (DC_IS_ADMTEK(sc)) { 1797 /* 1798 * Set automatic TX underrun recovery for the ADMtek chips 1799 */ 1800 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 1801 } 1802 1803 /* 1804 * Call MI attach routines. 1805 */ 1806 if_attach(ifp); 1807 ether_ifattach(ifp); 1808 1809 sc->sc_dhook = shutdownhook_establish(dc_shutdown, sc); 1810 sc->sc_pwrhook = powerhook_establish(dc_power, sc); 1811 1812 fail: 1813 return; 1814 } 1815 1816 /* 1817 * Initialize the transmit descriptors. 1818 */ 1819 int 1820 dc_list_tx_init(struct dc_softc *sc) 1821 { 1822 struct dc_chain_data *cd; 1823 struct dc_list_data *ld; 1824 int i; 1825 bus_addr_t next; 1826 1827 cd = &sc->dc_cdata; 1828 ld = sc->dc_ldata; 1829 for (i = 0; i < DC_TX_LIST_CNT; i++) { 1830 next = sc->sc_listmap->dm_segs[0].ds_addr; 1831 if (i == (DC_TX_LIST_CNT - 1)) 1832 next += 1833 offsetof(struct dc_list_data, dc_tx_list[0]); 1834 else 1835 next += 1836 offsetof(struct dc_list_data, dc_tx_list[i + 1]); 1837 cd->dc_tx_chain[i].sd_mbuf = NULL; 1838 ld->dc_tx_list[i].dc_data = htole32(0); 1839 ld->dc_tx_list[i].dc_ctl = htole32(0); 1840 ld->dc_tx_list[i].dc_next = htole32(next); 1841 } 1842 1843 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 1844 1845 return (0); 1846 } 1847 1848 1849 /* 1850 * Initialize the RX descriptors and allocate mbufs for them. Note that 1851 * we arrange the descriptors in a closed ring, so that the last descriptor 1852 * points back to the first. 1853 */ 1854 int 1855 dc_list_rx_init(struct dc_softc *sc) 1856 { 1857 struct dc_chain_data *cd; 1858 struct dc_list_data *ld; 1859 int i; 1860 bus_addr_t next; 1861 1862 cd = &sc->dc_cdata; 1863 ld = sc->dc_ldata; 1864 1865 for (i = 0; i < DC_RX_LIST_CNT; i++) { 1866 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 1867 return (ENOBUFS); 1868 next = sc->sc_listmap->dm_segs[0].ds_addr; 1869 if (i == (DC_RX_LIST_CNT - 1)) 1870 next += 1871 offsetof(struct dc_list_data, dc_rx_list[0]); 1872 else 1873 next += 1874 offsetof(struct dc_list_data, dc_rx_list[i + 1]); 1875 ld->dc_rx_list[i].dc_next = htole32(next); 1876 } 1877 1878 cd->dc_rx_prod = 0; 1879 1880 return (0); 1881 } 1882 1883 /* 1884 * Initialize an RX descriptor and attach an MBUF cluster. 1885 */ 1886 int 1887 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m) 1888 { 1889 struct mbuf *m_new = NULL; 1890 struct dc_desc *c; 1891 bus_dmamap_t map; 1892 1893 c = &sc->dc_ldata->dc_rx_list[i]; 1894 1895 if (m == NULL) { 1896 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1897 if (m_new == NULL) 1898 return (ENOBUFS); 1899 1900 MCLGET(m_new, M_DONTWAIT); 1901 if (!(m_new->m_flags & M_EXT)) { 1902 m_freem(m_new); 1903 return (ENOBUFS); 1904 } 1905 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1906 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap, 1907 m_new, BUS_DMA_NOWAIT) != 0) { 1908 m_freem(m_new); 1909 return (ENOBUFS); 1910 } 1911 map = sc->dc_cdata.dc_rx_chain[i].sd_map; 1912 sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap; 1913 sc->sc_rx_sparemap = map; 1914 } else { 1915 /* 1916 * We're re-using a previously allocated mbuf; 1917 * be sure to re-init pointers and lengths to 1918 * default values. 1919 */ 1920 m_new = m; 1921 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1922 m_new->m_data = m_new->m_ext.ext_buf; 1923 } 1924 1925 m_adj(m_new, sizeof(u_int64_t)); 1926 1927 /* 1928 * If this is a PNIC chip, zero the buffer. This is part 1929 * of the workaround for the receive bug in the 82c168 and 1930 * 82c169 chips. 1931 */ 1932 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 1933 bzero((char *)mtod(m_new, char *), m_new->m_len); 1934 1935 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0, 1936 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize, 1937 BUS_DMASYNC_PREREAD); 1938 1939 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new; 1940 c->dc_data = htole32( 1941 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr + 1942 sizeof(u_int64_t)); 1943 c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN); 1944 c->dc_status = htole32(DC_RXSTAT_OWN); 1945 1946 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1947 offsetof(struct dc_list_data, dc_rx_list[i]), 1948 sizeof(struct dc_desc), 1949 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1950 1951 return (0); 1952 } 1953 1954 /* 1955 * Grrrrr. 1956 * The PNIC chip has a terrible bug in it that manifests itself during 1957 * periods of heavy activity. The exact mode of failure if difficult to 1958 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 1959 * will happen on slow machines. The bug is that sometimes instead of 1960 * uploading one complete frame during reception, it uploads what looks 1961 * like the entire contents of its FIFO memory. The frame we want is at 1962 * the end of the whole mess, but we never know exactly how much data has 1963 * been uploaded, so salvaging the frame is hard. 1964 * 1965 * There is only one way to do it reliably, and it's disgusting. 1966 * Here's what we know: 1967 * 1968 * - We know there will always be somewhere between one and three extra 1969 * descriptors uploaded. 1970 * 1971 * - We know the desired received frame will always be at the end of the 1972 * total data upload. 1973 * 1974 * - We know the size of the desired received frame because it will be 1975 * provided in the length field of the status word in the last descriptor. 1976 * 1977 * Here's what we do: 1978 * 1979 * - When we allocate buffers for the receive ring, we bzero() them. 1980 * This means that we know that the buffer contents should be all 1981 * zeros, except for data uploaded by the chip. 1982 * 1983 * - We also force the PNIC chip to upload frames that include the 1984 * ethernet CRC at the end. 1985 * 1986 * - We gather all of the bogus frame data into a single buffer. 1987 * 1988 * - We then position a pointer at the end of this buffer and scan 1989 * backwards until we encounter the first non-zero byte of data. 1990 * This is the end of the received frame. We know we will encounter 1991 * some data at the end of the frame because the CRC will always be 1992 * there, so even if the sender transmits a packet of all zeros, 1993 * we won't be fooled. 1994 * 1995 * - We know the size of the actual received frame, so we subtract 1996 * that value from the current pointer location. This brings us 1997 * to the start of the actual received packet. 1998 * 1999 * - We copy this into an mbuf and pass it on, along with the actual 2000 * frame length. 2001 * 2002 * The performance hit is tremendous, but it beats dropping frames all 2003 * the time. 2004 */ 2005 2006 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2007 void 2008 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2009 { 2010 struct dc_desc *cur_rx; 2011 struct dc_desc *c = NULL; 2012 struct mbuf *m = NULL; 2013 unsigned char *ptr; 2014 int i, total_len; 2015 u_int32_t rxstat = 0; 2016 2017 i = sc->dc_pnic_rx_bug_save; 2018 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2019 ptr = sc->dc_pnic_rx_buf; 2020 bzero(ptr, ETHER_MAX_DIX_LEN * 5); 2021 2022 /* Copy all the bytes from the bogus buffers. */ 2023 while (1) { 2024 c = &sc->dc_ldata->dc_rx_list[i]; 2025 rxstat = letoh32(c->dc_status); 2026 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf; 2027 bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN); 2028 ptr += ETHER_MAX_DIX_LEN; 2029 /* If this is the last buffer, break out. */ 2030 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2031 break; 2032 dc_newbuf(sc, i, m); 2033 DC_INC(i, DC_RX_LIST_CNT); 2034 } 2035 2036 /* Find the length of the actual receive frame. */ 2037 total_len = DC_RXBYTES(rxstat); 2038 2039 /* Scan backwards until we hit a non-zero byte. */ 2040 while(*ptr == 0x00) 2041 ptr--; 2042 2043 /* Round off. */ 2044 if ((unsigned long)(ptr) & 0x3) 2045 ptr -= 1; 2046 2047 /* Now find the start of the frame. */ 2048 ptr -= total_len; 2049 if (ptr < sc->dc_pnic_rx_buf) 2050 ptr = sc->dc_pnic_rx_buf; 2051 2052 /* 2053 * Now copy the salvaged frame to the last mbuf and fake up 2054 * the status word to make it look like a successful 2055 * frame reception. 2056 */ 2057 dc_newbuf(sc, i, m); 2058 bcopy(ptr, mtod(m, char *), total_len); 2059 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2060 } 2061 2062 /* 2063 * This routine searches the RX ring for dirty descriptors in the 2064 * event that the rxeof routine falls out of sync with the chip's 2065 * current descriptor pointer. This may happen sometimes as a result 2066 * of a "no RX buffer available" condition that happens when the chip 2067 * consumes all of the RX buffers before the driver has a chance to 2068 * process the RX ring. This routine may need to be called more than 2069 * once to bring the driver back in sync with the chip, however we 2070 * should still be getting RX DONE interrupts to drive the search 2071 * for new packets in the RX ring, so we should catch up eventually. 2072 */ 2073 int 2074 dc_rx_resync(struct dc_softc *sc) 2075 { 2076 u_int32_t stat; 2077 int i, pos, offset; 2078 2079 pos = sc->dc_cdata.dc_rx_prod; 2080 2081 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2082 2083 offset = offsetof(struct dc_list_data, dc_rx_list[pos]); 2084 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2085 offset, sizeof(struct dc_desc), 2086 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2087 2088 stat = sc->dc_ldata->dc_rx_list[pos].dc_status; 2089 if (!(stat & htole32(DC_RXSTAT_OWN))) 2090 break; 2091 DC_INC(pos, DC_RX_LIST_CNT); 2092 } 2093 2094 /* If the ring really is empty, then just return. */ 2095 if (i == DC_RX_LIST_CNT) 2096 return (0); 2097 2098 /* We've fallen behind the chip: catch it. */ 2099 sc->dc_cdata.dc_rx_prod = pos; 2100 2101 return (EAGAIN); 2102 } 2103 2104 /* 2105 * A frame has been uploaded: pass the resulting mbuf chain up to 2106 * the higher level protocols. 2107 */ 2108 void 2109 dc_rxeof(struct dc_softc *sc) 2110 { 2111 struct mbuf *m; 2112 struct ifnet *ifp; 2113 struct dc_desc *cur_rx; 2114 int i, offset, total_len = 0; 2115 u_int32_t rxstat; 2116 2117 ifp = &sc->sc_arpcom.ac_if; 2118 i = sc->dc_cdata.dc_rx_prod; 2119 2120 for(;;) { 2121 struct mbuf *m0 = NULL; 2122 2123 offset = offsetof(struct dc_list_data, dc_rx_list[i]); 2124 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2125 offset, sizeof(struct dc_desc), 2126 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2127 2128 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2129 rxstat = letoh32(cur_rx->dc_status); 2130 if (rxstat & DC_RXSTAT_OWN) 2131 break; 2132 2133 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf; 2134 total_len = DC_RXBYTES(rxstat); 2135 2136 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 2137 0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize, 2138 BUS_DMASYNC_POSTREAD); 2139 2140 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2141 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2142 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2143 sc->dc_pnic_rx_bug_save = i; 2144 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2145 DC_INC(i, DC_RX_LIST_CNT); 2146 continue; 2147 } 2148 dc_pnic_rx_bug_war(sc, i); 2149 rxstat = letoh32(cur_rx->dc_status); 2150 total_len = DC_RXBYTES(rxstat); 2151 } 2152 } 2153 2154 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL; 2155 2156 /* 2157 * If an error occurs, update stats, clear the 2158 * status word and leave the mbuf cluster in place: 2159 * it should simply get re-used next time this descriptor 2160 * comes up in the ring. However, don't report long 2161 * frames as errors since they could be VLANs. 2162 */ 2163 if ((rxstat & DC_RXSTAT_RXERR)) { 2164 if (!(rxstat & DC_RXSTAT_GIANT) || 2165 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2166 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2167 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2168 ifp->if_ierrors++; 2169 if (rxstat & DC_RXSTAT_COLLSEEN) 2170 ifp->if_collisions++; 2171 dc_newbuf(sc, i, m); 2172 if (rxstat & DC_RXSTAT_CRCERR) { 2173 DC_INC(i, DC_RX_LIST_CNT); 2174 continue; 2175 } else { 2176 dc_init(sc); 2177 return; 2178 } 2179 } 2180 } 2181 2182 /* No errors; receive the packet. */ 2183 total_len -= ETHER_CRC_LEN; 2184 2185 m->m_pkthdr.rcvif = ifp; 2186 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 2187 total_len + ETHER_ALIGN, 0, ifp, NULL); 2188 dc_newbuf(sc, i, m); 2189 DC_INC(i, DC_RX_LIST_CNT); 2190 if (m0 == NULL) { 2191 ifp->if_ierrors++; 2192 continue; 2193 } 2194 m_adj(m0, ETHER_ALIGN); 2195 m = m0; 2196 2197 ifp->if_ipackets++; 2198 #if NBPFILTER > 0 2199 if (ifp->if_bpf) 2200 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 2201 #endif 2202 ether_input_mbuf(ifp, m); 2203 } 2204 2205 sc->dc_cdata.dc_rx_prod = i; 2206 } 2207 2208 /* 2209 * A frame was downloaded to the chip. It's safe for us to clean up 2210 * the list buffers. 2211 */ 2212 2213 void 2214 dc_txeof(struct dc_softc *sc) 2215 { 2216 struct dc_desc *cur_tx = NULL; 2217 struct ifnet *ifp; 2218 int idx, offset; 2219 2220 ifp = &sc->sc_arpcom.ac_if; 2221 2222 /* 2223 * Go through our tx list and free mbufs for those 2224 * frames that have been transmitted. 2225 */ 2226 idx = sc->dc_cdata.dc_tx_cons; 2227 while(idx != sc->dc_cdata.dc_tx_prod) { 2228 u_int32_t txstat; 2229 2230 offset = offsetof(struct dc_list_data, dc_tx_list[idx]); 2231 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2232 offset, sizeof(struct dc_desc), 2233 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2234 2235 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2236 txstat = letoh32(cur_tx->dc_status); 2237 2238 if (txstat & DC_TXSTAT_OWN) 2239 break; 2240 2241 if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) || 2242 cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) { 2243 if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) { 2244 /* 2245 * Yes, the PNIC is so brain damaged 2246 * that it will sometimes generate a TX 2247 * underrun error while DMAing the RX 2248 * filter setup frame. If we detect this, 2249 * we have to send the setup frame again, 2250 * or else the filter won't be programmed 2251 * correctly. 2252 */ 2253 if (DC_IS_PNIC(sc)) { 2254 if (txstat & DC_TXSTAT_ERRSUM) 2255 dc_setfilt(sc); 2256 } 2257 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL; 2258 } 2259 sc->dc_cdata.dc_tx_cnt--; 2260 DC_INC(idx, DC_TX_LIST_CNT); 2261 continue; 2262 } 2263 2264 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2265 /* 2266 * XXX: Why does my Xircom taunt me so? 2267 * For some reason it likes setting the CARRLOST flag 2268 * even when the carrier is there. wtf?! 2269 * Who knows, but Conexant chips have the 2270 * same problem. Maybe they took lessons 2271 * from Xircom. 2272 */ 2273 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2274 sc->dc_pmode == DC_PMODE_MII && 2275 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2276 DC_TXSTAT_NOCARRIER))) 2277 txstat &= ~DC_TXSTAT_ERRSUM; 2278 } else { 2279 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2280 sc->dc_pmode == DC_PMODE_MII && 2281 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2282 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2283 txstat &= ~DC_TXSTAT_ERRSUM; 2284 } 2285 2286 if (txstat & DC_TXSTAT_ERRSUM) { 2287 ifp->if_oerrors++; 2288 if (txstat & DC_TXSTAT_EXCESSCOLL) 2289 ifp->if_collisions++; 2290 if (txstat & DC_TXSTAT_LATECOLL) 2291 ifp->if_collisions++; 2292 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2293 dc_init(sc); 2294 return; 2295 } 2296 } 2297 2298 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2299 2300 ifp->if_opackets++; 2301 if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) { 2302 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map; 2303 2304 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2305 BUS_DMASYNC_POSTWRITE); 2306 bus_dmamap_unload(sc->sc_dmat, map); 2307 } 2308 if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) { 2309 m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf); 2310 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL; 2311 } 2312 2313 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2314 offset, sizeof(struct dc_desc), 2315 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2316 2317 sc->dc_cdata.dc_tx_cnt--; 2318 DC_INC(idx, DC_TX_LIST_CNT); 2319 } 2320 sc->dc_cdata.dc_tx_cons = idx; 2321 2322 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5) 2323 ifp->if_flags &= ~IFF_OACTIVE; 2324 if (sc->dc_cdata.dc_tx_cnt == 0) 2325 ifp->if_timer = 0; 2326 } 2327 2328 void 2329 dc_tick(void *xsc) 2330 { 2331 struct dc_softc *sc = (struct dc_softc *)xsc; 2332 struct mii_data *mii; 2333 struct ifnet *ifp; 2334 int s; 2335 u_int32_t r; 2336 2337 s = splnet(); 2338 2339 ifp = &sc->sc_arpcom.ac_if; 2340 mii = &sc->sc_mii; 2341 2342 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2343 if (sc->dc_flags & DC_21143_NWAY) { 2344 r = CSR_READ_4(sc, DC_10BTSTAT); 2345 if (IFM_SUBTYPE(mii->mii_media_active) == 2346 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2347 sc->dc_link = 0; 2348 mii_mediachg(mii); 2349 } 2350 if (IFM_SUBTYPE(mii->mii_media_active) == 2351 IFM_10_T && (r & DC_TSTAT_LS10)) { 2352 sc->dc_link = 0; 2353 mii_mediachg(mii); 2354 } 2355 if (sc->dc_link == 0) 2356 mii_tick(mii); 2357 } else { 2358 /* 2359 * For NICs which never report DC_RXSTATE_WAIT, we 2360 * have to bite the bullet... 2361 */ 2362 if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc, 2363 DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && 2364 sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) { 2365 mii_tick(mii); 2366 if (!(mii->mii_media_status & IFM_ACTIVE)) 2367 sc->dc_link = 0; 2368 } 2369 } 2370 } else 2371 mii_tick(mii); 2372 2373 /* 2374 * When the init routine completes, we expect to be able to send 2375 * packets right away, and in fact the network code will send a 2376 * gratuitous ARP the moment the init routine marks the interface 2377 * as running. However, even though the MAC may have been initialized, 2378 * there may be a delay of a few seconds before the PHY completes 2379 * autonegotiation and the link is brought up. Any transmissions 2380 * made during that delay will be lost. Dealing with this is tricky: 2381 * we can't just pause in the init routine while waiting for the 2382 * PHY to come ready since that would bring the whole system to 2383 * a screeching halt for several seconds. 2384 * 2385 * What we do here is prevent the TX start routine from sending 2386 * any packets until a link has been established. After the 2387 * interface has been initialized, the tick routine will poll 2388 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2389 * that time, packets will stay in the send queue, and once the 2390 * link comes up, they will be flushed out to the wire. 2391 */ 2392 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2393 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2394 sc->dc_link++; 2395 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2396 dc_start(ifp); 2397 } 2398 2399 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2400 timeout_add(&sc->dc_tick_tmo, hz / 10); 2401 else 2402 timeout_add(&sc->dc_tick_tmo, hz); 2403 2404 splx(s); 2405 } 2406 2407 /* A transmit underrun has occurred. Back off the transmit threshold, 2408 * or switch to store and forward mode if we have to. 2409 */ 2410 void 2411 dc_tx_underrun(struct dc_softc *sc) 2412 { 2413 u_int32_t isr; 2414 int i; 2415 2416 if (DC_IS_DAVICOM(sc)) 2417 dc_init(sc); 2418 2419 if (DC_IS_INTEL(sc)) { 2420 /* 2421 * The real 21143 requires that the transmitter be idle 2422 * in order to change the transmit threshold or store 2423 * and forward state. 2424 */ 2425 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2426 2427 for (i = 0; i < DC_TIMEOUT; i++) { 2428 isr = CSR_READ_4(sc, DC_ISR); 2429 if (isr & DC_ISR_TX_IDLE) 2430 break; 2431 DELAY(10); 2432 } 2433 if (i == DC_TIMEOUT) { 2434 printf("%s: failed to force tx to idle state\n", 2435 sc->sc_dev.dv_xname); 2436 dc_init(sc); 2437 } 2438 } 2439 2440 sc->dc_txthresh += DC_TXTHRESH_INC; 2441 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2442 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2443 } else { 2444 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2445 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2446 } 2447 2448 if (DC_IS_INTEL(sc)) 2449 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2450 2451 return; 2452 } 2453 2454 int 2455 dc_intr(void *arg) 2456 { 2457 struct dc_softc *sc; 2458 struct ifnet *ifp; 2459 u_int32_t status; 2460 int claimed = 0; 2461 2462 sc = arg; 2463 2464 ifp = &sc->sc_arpcom.ac_if; 2465 2466 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 2467 return (claimed); 2468 2469 /* Suppress unwanted interrupts */ 2470 if (!(ifp->if_flags & IFF_UP)) { 2471 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 2472 dc_stop(sc); 2473 return (claimed); 2474 } 2475 2476 /* Disable interrupts. */ 2477 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2478 2479 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && 2480 status != 0xFFFFFFFF && 2481 (ifp->if_flags & IFF_RUNNING)) { 2482 2483 claimed = 1; 2484 CSR_WRITE_4(sc, DC_ISR, status); 2485 2486 if (status & DC_ISR_RX_OK) { 2487 int curpkts; 2488 curpkts = ifp->if_ipackets; 2489 dc_rxeof(sc); 2490 if (curpkts == ifp->if_ipackets) { 2491 while(dc_rx_resync(sc)) 2492 dc_rxeof(sc); 2493 } 2494 } 2495 2496 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 2497 dc_txeof(sc); 2498 2499 if (status & DC_ISR_TX_IDLE) { 2500 dc_txeof(sc); 2501 if (sc->dc_cdata.dc_tx_cnt) { 2502 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2503 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2504 } 2505 } 2506 2507 if (status & DC_ISR_TX_UNDERRUN) 2508 dc_tx_underrun(sc); 2509 2510 if ((status & DC_ISR_RX_WATDOGTIMEO) 2511 || (status & DC_ISR_RX_NOBUF)) { 2512 int curpkts; 2513 curpkts = ifp->if_ipackets; 2514 dc_rxeof(sc); 2515 if (curpkts == ifp->if_ipackets) { 2516 while(dc_rx_resync(sc)) 2517 dc_rxeof(sc); 2518 } 2519 } 2520 2521 if (status & DC_ISR_BUS_ERR) { 2522 dc_reset(sc); 2523 dc_init(sc); 2524 } 2525 } 2526 2527 /* Re-enable interrupts. */ 2528 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2529 2530 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2531 dc_start(ifp); 2532 2533 return (claimed); 2534 } 2535 2536 /* 2537 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2538 * pointers to the fragment pointers. 2539 */ 2540 int 2541 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 2542 { 2543 struct dc_desc *f = NULL; 2544 int frag, cur, cnt = 0, i; 2545 bus_dmamap_t map; 2546 2547 /* 2548 * Start packing the mbufs in this chain into 2549 * the fragment pointers. Stop when we run out 2550 * of fragments or hit the end of the mbuf chain. 2551 */ 2552 map = sc->sc_tx_sparemap; 2553 2554 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 2555 m_head, BUS_DMA_NOWAIT) != 0) 2556 return (ENOBUFS); 2557 2558 cur = frag = *txidx; 2559 2560 for (i = 0; i < map->dm_nsegs; i++) { 2561 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 2562 if (*txidx != sc->dc_cdata.dc_tx_prod && 2563 frag == (DC_TX_LIST_CNT - 1)) { 2564 bus_dmamap_unload(sc->sc_dmat, map); 2565 return (ENOBUFS); 2566 } 2567 } 2568 if ((DC_TX_LIST_CNT - 2569 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) { 2570 bus_dmamap_unload(sc->sc_dmat, map); 2571 return (ENOBUFS); 2572 } 2573 2574 f = &sc->dc_ldata->dc_tx_list[frag]; 2575 f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len); 2576 if (cnt == 0) { 2577 f->dc_status = htole32(0); 2578 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 2579 } else 2580 f->dc_status = htole32(DC_TXSTAT_OWN); 2581 f->dc_data = htole32(map->dm_segs[i].ds_addr); 2582 cur = frag; 2583 DC_INC(frag, DC_TX_LIST_CNT); 2584 cnt++; 2585 } 2586 2587 sc->dc_cdata.dc_tx_cnt += cnt; 2588 sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head; 2589 sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map; 2590 sc->dc_cdata.dc_tx_chain[cur].sd_map = map; 2591 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 2592 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 2593 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= 2594 htole32(DC_TXCTL_FINT); 2595 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 2596 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2597 htole32(DC_TXCTL_FINT); 2598 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 2599 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2600 htole32(DC_TXCTL_FINT); 2601 else if ((sc->dc_flags & DC_TX_USE_TX_INTR) && 2602 TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd)) 2603 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2604 htole32(DC_TXCTL_FINT); 2605 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2606 BUS_DMASYNC_PREWRITE); 2607 2608 sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN); 2609 2610 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2611 offsetof(struct dc_list_data, dc_tx_list[*txidx]), 2612 sizeof(struct dc_desc) * cnt, 2613 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2614 2615 *txidx = frag; 2616 2617 return (0); 2618 } 2619 2620 /* 2621 * Coalesce an mbuf chain into a single mbuf cluster buffer. 2622 * Needed for some really badly behaved chips that just can't 2623 * do scatter/gather correctly. 2624 */ 2625 int 2626 dc_coal(struct dc_softc *sc, struct mbuf **m_head) 2627 { 2628 struct mbuf *m_new, *m; 2629 2630 m = *m_head; 2631 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 2632 if (m_new == NULL) 2633 return (ENOBUFS); 2634 if (m->m_pkthdr.len > MHLEN) { 2635 MCLGET(m_new, M_DONTWAIT); 2636 if (!(m_new->m_flags & M_EXT)) { 2637 m_freem(m_new); 2638 return (ENOBUFS); 2639 } 2640 } 2641 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 2642 m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; 2643 m_freem(m); 2644 *m_head = m_new; 2645 2646 return (0); 2647 } 2648 2649 /* 2650 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2651 * to the mbuf data regions directly in the transmit lists. We also save a 2652 * copy of the pointers since the transmit list fragment pointers are 2653 * physical addresses. 2654 */ 2655 2656 void 2657 dc_start(struct ifnet *ifp) 2658 { 2659 struct dc_softc *sc; 2660 struct mbuf *m_head = NULL; 2661 int idx; 2662 2663 sc = ifp->if_softc; 2664 2665 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) 2666 return; 2667 2668 if (ifp->if_flags & IFF_OACTIVE) 2669 return; 2670 2671 idx = sc->dc_cdata.dc_tx_prod; 2672 2673 while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) { 2674 IFQ_POLL(&ifp->if_snd, m_head); 2675 if (m_head == NULL) 2676 break; 2677 2678 if (sc->dc_flags & DC_TX_COALESCE && 2679 (m_head->m_next != NULL || 2680 sc->dc_flags & DC_TX_ALIGN)) { 2681 /* note: dc_coal breaks the poll-and-dequeue rule. 2682 * if dc_coal fails, we lose the packet. 2683 */ 2684 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2685 if (dc_coal(sc, &m_head)) { 2686 ifp->if_flags |= IFF_OACTIVE; 2687 break; 2688 } 2689 } 2690 2691 if (dc_encap(sc, m_head, &idx)) { 2692 ifp->if_flags |= IFF_OACTIVE; 2693 break; 2694 } 2695 2696 /* now we are committed to transmit the packet */ 2697 if (sc->dc_flags & DC_TX_COALESCE) { 2698 /* if mbuf is coalesced, it is already dequeued */ 2699 } else 2700 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2701 2702 /* 2703 * If there's a BPF listener, bounce a copy of this frame 2704 * to him. 2705 */ 2706 #if NBPFILTER > 0 2707 if (ifp->if_bpf) 2708 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 2709 #endif 2710 if (sc->dc_flags & DC_TX_ONE) { 2711 ifp->if_flags |= IFF_OACTIVE; 2712 break; 2713 } 2714 } 2715 if (idx == sc->dc_cdata.dc_tx_prod) 2716 return; 2717 2718 /* Transmit */ 2719 sc->dc_cdata.dc_tx_prod = idx; 2720 if (!(sc->dc_flags & DC_TX_POLL)) 2721 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2722 2723 /* 2724 * Set a timeout in case the chip goes out to lunch. 2725 */ 2726 ifp->if_timer = 5; 2727 } 2728 2729 void 2730 dc_init(void *xsc) 2731 { 2732 struct dc_softc *sc = xsc; 2733 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2734 struct mii_data *mii; 2735 int s; 2736 2737 s = splnet(); 2738 2739 mii = &sc->sc_mii; 2740 2741 /* 2742 * Cancel pending I/O and free all RX/TX buffers. 2743 */ 2744 dc_stop(sc); 2745 dc_reset(sc); 2746 2747 /* 2748 * Set cache alignment and burst length. 2749 */ 2750 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 2751 CSR_WRITE_4(sc, DC_BUSCTL, 0); 2752 else 2753 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 2754 /* 2755 * Evenly share the bus between receive and transmit process. 2756 */ 2757 if (DC_IS_INTEL(sc)) 2758 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 2759 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 2760 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 2761 } else { 2762 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 2763 } 2764 if (sc->dc_flags & DC_TX_POLL) 2765 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 2766 switch(sc->dc_cachesize) { 2767 case 32: 2768 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 2769 break; 2770 case 16: 2771 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 2772 break; 2773 case 8: 2774 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 2775 break; 2776 case 0: 2777 default: 2778 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 2779 break; 2780 } 2781 2782 if (sc->dc_flags & DC_TX_STORENFWD) 2783 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2784 else { 2785 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2786 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2787 } else { 2788 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2789 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2790 } 2791 } 2792 2793 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 2794 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 2795 2796 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2797 /* 2798 * The app notes for the 98713 and 98715A say that 2799 * in order to have the chips operate properly, a magic 2800 * number must be written to CSR16. Macronix does not 2801 * document the meaning of these bits so there's no way 2802 * to know exactly what they do. The 98713 has a magic 2803 * number all its own; the rest all use a different one. 2804 */ 2805 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 2806 if (sc->dc_type == DC_TYPE_98713) 2807 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 2808 else 2809 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 2810 } 2811 2812 if (DC_IS_XIRCOM(sc)) { 2813 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2814 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2815 DELAY(10); 2816 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2817 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2818 DELAY(10); 2819 } 2820 2821 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2822 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 2823 2824 /* Init circular RX list. */ 2825 if (dc_list_rx_init(sc) == ENOBUFS) { 2826 printf("%s: initialization failed: no " 2827 "memory for rx buffers\n", sc->sc_dev.dv_xname); 2828 dc_stop(sc); 2829 splx(s); 2830 return; 2831 } 2832 2833 /* 2834 * Init tx descriptors. 2835 */ 2836 dc_list_tx_init(sc); 2837 2838 /* 2839 * Sync down both lists initialized. 2840 */ 2841 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2842 0, sc->sc_listmap->dm_mapsize, 2843 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2844 2845 /* 2846 * Load the address of the RX list. 2847 */ 2848 CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 2849 offsetof(struct dc_list_data, dc_rx_list[0])); 2850 CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 2851 offsetof(struct dc_list_data, dc_tx_list[0])); 2852 2853 /* 2854 * Enable interrupts. 2855 */ 2856 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2857 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 2858 2859 /* Enable transmitter. */ 2860 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2861 2862 /* 2863 * If this is an Intel 21143 and we're not using the 2864 * MII port, program the LED control pins so we get 2865 * link and activity indications. 2866 */ 2867 if (sc->dc_flags & DC_TULIP_LEDS) { 2868 CSR_WRITE_4(sc, DC_WATCHDOG, 2869 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 2870 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 2871 } 2872 2873 /* 2874 * Load the RX/multicast filter. We do this sort of late 2875 * because the filter programming scheme on the 21143 and 2876 * some clones requires DMAing a setup frame via the TX 2877 * engine, and we need the transmitter enabled for that. 2878 */ 2879 dc_setfilt(sc); 2880 2881 /* Enable receiver. */ 2882 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 2883 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 2884 2885 mii_mediachg(mii); 2886 dc_setcfg(sc, sc->dc_if_media); 2887 2888 ifp->if_flags |= IFF_RUNNING; 2889 ifp->if_flags &= ~IFF_OACTIVE; 2890 2891 splx(s); 2892 2893 timeout_set(&sc->dc_tick_tmo, dc_tick, sc); 2894 2895 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 2896 sc->dc_link = 1; 2897 else { 2898 if (sc->dc_flags & DC_21143_NWAY) 2899 timeout_add(&sc->dc_tick_tmo, hz / 10); 2900 else 2901 timeout_add(&sc->dc_tick_tmo, hz); 2902 } 2903 2904 #ifdef SRM_MEDIA 2905 if(sc->dc_srm_media) { 2906 struct ifreq ifr; 2907 2908 ifr.ifr_media = sc->dc_srm_media; 2909 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 2910 sc->dc_srm_media = 0; 2911 } 2912 #endif 2913 } 2914 2915 /* 2916 * Set media options. 2917 */ 2918 int 2919 dc_ifmedia_upd(struct ifnet *ifp) 2920 { 2921 struct dc_softc *sc; 2922 struct mii_data *mii; 2923 struct ifmedia *ifm; 2924 2925 sc = ifp->if_softc; 2926 mii = &sc->sc_mii; 2927 mii_mediachg(mii); 2928 2929 ifm = &mii->mii_media; 2930 2931 if (DC_IS_DAVICOM(sc) && 2932 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 2933 dc_setcfg(sc, ifm->ifm_media); 2934 else 2935 sc->dc_link = 0; 2936 2937 return (0); 2938 } 2939 2940 /* 2941 * Report current media status. 2942 */ 2943 void 2944 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2945 { 2946 struct dc_softc *sc; 2947 struct mii_data *mii; 2948 struct ifmedia *ifm; 2949 2950 sc = ifp->if_softc; 2951 mii = &sc->sc_mii; 2952 mii_pollstat(mii); 2953 ifm = &mii->mii_media; 2954 if (DC_IS_DAVICOM(sc)) { 2955 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 2956 ifmr->ifm_active = ifm->ifm_media; 2957 ifmr->ifm_status = 0; 2958 return; 2959 } 2960 } 2961 ifmr->ifm_active = mii->mii_media_active; 2962 ifmr->ifm_status = mii->mii_media_status; 2963 } 2964 2965 int 2966 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2967 { 2968 struct dc_softc *sc = ifp->if_softc; 2969 struct ifreq *ifr = (struct ifreq *) data; 2970 struct ifaddr *ifa = (struct ifaddr *)data; 2971 struct mii_data *mii; 2972 int s, error = 0; 2973 2974 s = splnet(); 2975 2976 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { 2977 splx(s); 2978 return (error); 2979 } 2980 2981 switch(command) { 2982 case SIOCSIFADDR: 2983 ifp->if_flags |= IFF_UP; 2984 if (!(ifp->if_flags & IFF_RUNNING)) 2985 dc_init(sc); 2986 #ifdef INET 2987 if (ifa->ifa_addr->sa_family == AF_INET) 2988 arp_ifinit(&sc->sc_arpcom, ifa); 2989 #endif 2990 break; 2991 case SIOCSIFFLAGS: 2992 if (ifp->if_flags & IFF_UP) { 2993 if (ifp->if_flags & IFF_RUNNING && 2994 (ifp->if_flags ^ sc->dc_if_flags) & 2995 IFF_PROMISC) { 2996 dc_setfilt(sc); 2997 } else { 2998 if (!(ifp->if_flags & IFF_RUNNING)) { 2999 sc->dc_txthresh = 0; 3000 dc_init(sc); 3001 } 3002 } 3003 } else { 3004 if (ifp->if_flags & IFF_RUNNING) 3005 dc_stop(sc); 3006 } 3007 sc->dc_if_flags = ifp->if_flags; 3008 break; 3009 case SIOCSIFMTU: 3010 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 3011 error = EINVAL; 3012 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3013 ifp->if_mtu = ifr->ifr_mtu; 3014 } 3015 break; 3016 case SIOCADDMULTI: 3017 case SIOCDELMULTI: 3018 error = (command == SIOCADDMULTI) ? 3019 ether_addmulti(ifr, &sc->sc_arpcom) : 3020 ether_delmulti(ifr, &sc->sc_arpcom); 3021 3022 if (error == ENETRESET) { 3023 /* 3024 * Multicast list has changed; set the hardware 3025 * filter accordingly. 3026 */ 3027 if (ifp->if_flags & IFF_RUNNING) 3028 dc_setfilt(sc); 3029 error = 0; 3030 } 3031 break; 3032 case SIOCGIFMEDIA: 3033 case SIOCSIFMEDIA: 3034 mii = &sc->sc_mii; 3035 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3036 #ifdef SRM_MEDIA 3037 if (sc->dc_srm_media) 3038 sc->dc_srm_media = 0; 3039 #endif 3040 break; 3041 default: 3042 error = EINVAL; 3043 break; 3044 } 3045 3046 splx(s); 3047 3048 return (error); 3049 } 3050 3051 void 3052 dc_watchdog(struct ifnet *ifp) 3053 { 3054 struct dc_softc *sc; 3055 3056 sc = ifp->if_softc; 3057 3058 ifp->if_oerrors++; 3059 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 3060 3061 dc_stop(sc); 3062 dc_reset(sc); 3063 dc_init(sc); 3064 3065 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 3066 dc_start(ifp); 3067 } 3068 3069 /* 3070 * Stop the adapter and free any mbufs allocated to the 3071 * RX and TX lists. 3072 */ 3073 void 3074 dc_stop(struct dc_softc *sc) 3075 { 3076 struct ifnet *ifp; 3077 int i; 3078 3079 ifp = &sc->sc_arpcom.ac_if; 3080 ifp->if_timer = 0; 3081 3082 timeout_del(&sc->dc_tick_tmo); 3083 3084 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3085 3086 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3087 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3088 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3089 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3090 sc->dc_link = 0; 3091 3092 /* 3093 * Free data in the RX lists. 3094 */ 3095 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3096 if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) { 3097 bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map; 3098 3099 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3100 BUS_DMASYNC_POSTREAD); 3101 bus_dmamap_unload(sc->sc_dmat, map); 3102 } 3103 if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) { 3104 m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf); 3105 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL; 3106 } 3107 } 3108 bzero((char *)&sc->dc_ldata->dc_rx_list, 3109 sizeof(sc->dc_ldata->dc_rx_list)); 3110 3111 /* 3112 * Free the TX list buffers. 3113 */ 3114 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3115 if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) { 3116 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map; 3117 3118 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3119 BUS_DMASYNC_POSTWRITE); 3120 bus_dmamap_unload(sc->sc_dmat, map); 3121 } 3122 if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) { 3123 if (sc->dc_ldata->dc_tx_list[i].dc_ctl & 3124 htole32(DC_TXCTL_SETUP)) { 3125 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL; 3126 continue; 3127 } 3128 m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf); 3129 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL; 3130 } 3131 } 3132 bzero((char *)&sc->dc_ldata->dc_tx_list, 3133 sizeof(sc->dc_ldata->dc_tx_list)); 3134 3135 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 3136 0, sc->sc_listmap->dm_mapsize, 3137 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3138 } 3139 3140 /* 3141 * Stop all chip I/O so that the kernel's probe routines don't 3142 * get confused by errant DMAs when rebooting. 3143 */ 3144 void 3145 dc_shutdown(void *v) 3146 { 3147 struct dc_softc *sc = (struct dc_softc *)v; 3148 3149 dc_stop(sc); 3150 } 3151 3152 void 3153 dc_power(int why, void *arg) 3154 { 3155 struct dc_softc *sc = arg; 3156 struct ifnet *ifp; 3157 int s; 3158 3159 s = splnet(); 3160 if (why != PWR_RESUME) 3161 dc_stop(sc); 3162 else { 3163 ifp = &sc->sc_arpcom.ac_if; 3164 if (ifp->if_flags & IFF_UP) 3165 dc_init(sc); 3166 } 3167 splx(s); 3168 } 3169 3170 struct cfdriver dc_cd = { 3171 0, "dc", DV_IFNET 3172 }; 3173