1 /* $OpenBSD: dc.c,v 1.44 2001/12/13 17:43:02 nate Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $ 35 */ 36 37 /* 38 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 39 * series chips and several workalikes including the following: 40 * 41 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 42 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 43 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 44 * ASIX Electronics AX88140A (www.asix.com.tw) 45 * ASIX Electronics AX88141 (www.asix.com.tw) 46 * ADMtek AL981 (www.admtek.com.tw) 47 * ADMtek AN983 (www.admtek.com.tw) 48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 49 * Accton EN1217, EN2242 (www.accton.com) 50 * Xircom X3201 (www.xircom.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64 /* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92 #include "bpfilter.h" 93 #include "vlan.h" 94 95 #include <sys/param.h> 96 #include <sys/systm.h> 97 #include <sys/mbuf.h> 98 #include <sys/protosw.h> 99 #include <sys/socket.h> 100 #include <sys/ioctl.h> 101 #include <sys/errno.h> 102 #include <sys/malloc.h> 103 #include <sys/kernel.h> 104 #include <sys/device.h> 105 #include <sys/timeout.h> 106 107 #include <net/if.h> 108 #include <net/if_dl.h> 109 #include <net/if_types.h> 110 111 #ifdef INET 112 #include <netinet/in.h> 113 #include <netinet/in_systm.h> 114 #include <netinet/in_var.h> 115 #include <netinet/ip.h> 116 #include <netinet/if_ether.h> 117 #endif 118 119 #include <net/if_media.h> 120 121 #if NBPFILTER > 0 122 #include <net/bpf.h> 123 #endif 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 128 #include <machine/bus.h> 129 #include <dev/pci/pcidevs.h> 130 131 #define DC_USEIOSPACE 132 #include <dev/ic/dcreg.h> 133 134 int dc_intr __P((void *)); 135 void dc_shutdown __P((void *)); 136 struct dc_type *dc_devtype __P((void *)); 137 int dc_newbuf __P((struct dc_softc *, int, struct mbuf *)); 138 int dc_encap __P((struct dc_softc *, struct mbuf *, u_int32_t *)); 139 int dc_coal __P((struct dc_softc *, struct mbuf **)); 140 141 void dc_pnic_rx_bug_war __P((struct dc_softc *, int)); 142 int dc_rx_resync __P((struct dc_softc *)); 143 void dc_rxeof __P((struct dc_softc *)); 144 void dc_txeof __P((struct dc_softc *)); 145 void dc_tick __P((void *)); 146 void dc_start __P((struct ifnet *)); 147 int dc_ioctl __P((struct ifnet *, u_long, caddr_t)); 148 void dc_init __P((void *)); 149 void dc_stop __P((struct dc_softc *)); 150 void dc_watchdog __P((struct ifnet *)); 151 int dc_ifmedia_upd __P((struct ifnet *)); 152 void dc_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 153 154 void dc_delay __P((struct dc_softc *)); 155 void dc_eeprom_width __P((struct dc_softc *)); 156 void dc_eeprom_idle __P((struct dc_softc *)); 157 void dc_eeprom_putbyte __P((struct dc_softc *, int)); 158 void dc_eeprom_getword __P((struct dc_softc *, int, u_int16_t *)); 159 void dc_eeprom_getword_pnic __P((struct dc_softc *, int, u_int16_t *)); 160 void dc_read_eeprom __P((struct dc_softc *, caddr_t, int, int, int)); 161 162 void dc_mii_writebit __P((struct dc_softc *, int)); 163 int dc_mii_readbit __P((struct dc_softc *)); 164 void dc_mii_sync __P((struct dc_softc *)); 165 void dc_mii_send __P((struct dc_softc *, u_int32_t, int)); 166 int dc_mii_readreg __P((struct dc_softc *, struct dc_mii_frame *)); 167 int dc_mii_writereg __P((struct dc_softc *, struct dc_mii_frame *)); 168 int dc_miibus_readreg __P((struct device *, int, int)); 169 void dc_miibus_writereg __P((struct device *, int, int, int)); 170 void dc_miibus_statchg __P((struct device *)); 171 172 void dc_setcfg __P((struct dc_softc *, int)); 173 u_int32_t dc_crc_le __P((struct dc_softc *, caddr_t)); 174 u_int32_t dc_crc_be __P((caddr_t)); 175 void dc_setfilt_21143 __P((struct dc_softc *)); 176 void dc_setfilt_asix __P((struct dc_softc *)); 177 void dc_setfilt_admtek __P((struct dc_softc *)); 178 void dc_setfilt_xircom __P((struct dc_softc *)); 179 180 void dc_setfilt __P((struct dc_softc *)); 181 182 void dc_reset __P((struct dc_softc *)); 183 int dc_list_rx_init __P((struct dc_softc *)); 184 int dc_list_tx_init __P((struct dc_softc *)); 185 186 void dc_read_srom __P((struct dc_softc *, int)); 187 void dc_parse_21143_srom __P((struct dc_softc *)); 188 void dc_decode_leaf_sia __P((struct dc_softc *, 189 struct dc_eblock_sia *)); 190 void dc_decode_leaf_mii __P((struct dc_softc *, 191 struct dc_eblock_mii *)); 192 void dc_decode_leaf_sym __P((struct dc_softc *, 193 struct dc_eblock_sym *)); 194 void dc_apply_fixup __P((struct dc_softc *, int)); 195 196 #define DC_SETBIT(sc, reg, x) \ 197 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 198 199 #define DC_CLRBIT(sc, reg, x) \ 200 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 201 202 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 203 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 204 205 void dc_delay(sc) 206 struct dc_softc *sc; 207 { 208 int idx; 209 210 for (idx = (300 / 33) + 1; idx > 0; idx--) 211 CSR_READ_4(sc, DC_BUSCTL); 212 } 213 214 void dc_eeprom_width(sc) 215 struct dc_softc *sc; 216 { 217 int i; 218 219 /* Force EEPROM to idle state. */ 220 dc_eeprom_idle(sc); 221 222 /* Enter EEPROM access mode. */ 223 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 224 dc_delay(sc); 225 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 226 dc_delay(sc); 227 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 228 dc_delay(sc); 229 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 230 dc_delay(sc); 231 232 for (i = 3; i--;) { 233 if (6 & (1 << i)) 234 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 235 else 236 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 237 dc_delay(sc); 238 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 239 dc_delay(sc); 240 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 241 dc_delay(sc); 242 } 243 244 for (i = 1; i <= 12; i++) { 245 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 246 dc_delay(sc); 247 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 248 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 249 dc_delay(sc); 250 break; 251 } 252 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 253 dc_delay(sc); 254 } 255 256 /* Turn off EEPROM access mode. */ 257 dc_eeprom_idle(sc); 258 259 if (i < 4 || i > 12) 260 sc->dc_romwidth = 6; 261 else 262 sc->dc_romwidth = i; 263 264 /* Enter EEPROM access mode. */ 265 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 266 dc_delay(sc); 267 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 268 dc_delay(sc); 269 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 270 dc_delay(sc); 271 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 272 dc_delay(sc); 273 274 /* Turn off EEPROM access mode. */ 275 dc_eeprom_idle(sc); 276 } 277 278 void dc_eeprom_idle(sc) 279 struct dc_softc *sc; 280 { 281 register int i; 282 283 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 284 dc_delay(sc); 285 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 286 dc_delay(sc); 287 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 288 dc_delay(sc); 289 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 290 dc_delay(sc); 291 292 for (i = 0; i < 25; i++) { 293 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 294 dc_delay(sc); 295 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 296 dc_delay(sc); 297 } 298 299 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 300 dc_delay(sc); 301 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 302 dc_delay(sc); 303 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 304 305 return; 306 } 307 308 /* 309 * Send a read command and address to the EEPROM, check for ACK. 310 */ 311 void dc_eeprom_putbyte(sc, addr) 312 struct dc_softc *sc; 313 int addr; 314 { 315 register int d, i; 316 317 d = DC_EECMD_READ >> 6; 318 319 for (i = 3; i--; ) { 320 if (d & (1 << i)) 321 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 322 else 323 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 324 dc_delay(sc); 325 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 326 dc_delay(sc); 327 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 328 dc_delay(sc); 329 } 330 331 /* 332 * Feed in each bit and strobe the clock. 333 */ 334 for (i = sc->dc_romwidth; i--;) { 335 if (addr & (1 << i)) { 336 SIO_SET(DC_SIO_EE_DATAIN); 337 } else { 338 SIO_CLR(DC_SIO_EE_DATAIN); 339 } 340 dc_delay(sc); 341 SIO_SET(DC_SIO_EE_CLK); 342 dc_delay(sc); 343 SIO_CLR(DC_SIO_EE_CLK); 344 dc_delay(sc); 345 } 346 347 return; 348 } 349 350 /* 351 * Read a word of data stored in the EEPROM at address 'addr.' 352 * The PNIC 82c168/82c169 has its own non-standard way to read 353 * the EEPROM. 354 */ 355 void dc_eeprom_getword_pnic(sc, addr, dest) 356 struct dc_softc *sc; 357 int addr; 358 u_int16_t *dest; 359 { 360 register int i; 361 u_int32_t r; 362 363 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 364 365 for (i = 0; i < DC_TIMEOUT; i++) { 366 DELAY(1); 367 r = CSR_READ_4(sc, DC_SIO); 368 if (!(r & DC_PN_SIOCTL_BUSY)) { 369 *dest = (u_int16_t)(r & 0xFFFF); 370 return; 371 } 372 } 373 374 return; 375 } 376 377 /* 378 * Read a word of data stored in the EEPROM at address 'addr.' 379 */ 380 void dc_eeprom_getword(sc, addr, dest) 381 struct dc_softc *sc; 382 int addr; 383 u_int16_t *dest; 384 { 385 register int i; 386 u_int16_t word = 0; 387 388 /* Force EEPROM to idle state. */ 389 dc_eeprom_idle(sc); 390 391 /* Enter EEPROM access mode. */ 392 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 393 dc_delay(sc); 394 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 395 dc_delay(sc); 396 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 397 dc_delay(sc); 398 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 399 dc_delay(sc); 400 401 /* 402 * Send address of word we want to read. 403 */ 404 dc_eeprom_putbyte(sc, addr); 405 406 /* 407 * Start reading bits from EEPROM. 408 */ 409 for (i = 0x8000; i; i >>= 1) { 410 SIO_SET(DC_SIO_EE_CLK); 411 dc_delay(sc); 412 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 413 word |= i; 414 dc_delay(sc); 415 SIO_CLR(DC_SIO_EE_CLK); 416 dc_delay(sc); 417 } 418 419 /* Turn off EEPROM access mode. */ 420 dc_eeprom_idle(sc); 421 422 *dest = word; 423 424 return; 425 } 426 427 /* 428 * Read a sequence of words from the EEPROM. 429 */ 430 void dc_read_eeprom(sc, dest, off, cnt, swap) 431 struct dc_softc *sc; 432 caddr_t dest; 433 int off; 434 int cnt; 435 int swap; 436 { 437 int i; 438 u_int16_t word = 0, *ptr; 439 440 for (i = 0; i < cnt; i++) { 441 if (DC_IS_PNIC(sc)) 442 dc_eeprom_getword_pnic(sc, off + i, &word); 443 else 444 dc_eeprom_getword(sc, off + i, &word); 445 ptr = (u_int16_t *)(dest + (i * 2)); 446 if (swap) 447 *ptr = ntohs(word); 448 else 449 *ptr = word; 450 } 451 452 return; 453 } 454 455 /* 456 * The following two routines are taken from the Macronix 98713 457 * Application Notes pp.19-21. 458 */ 459 /* 460 * Write a bit to the MII bus. 461 */ 462 void dc_mii_writebit(sc, bit) 463 struct dc_softc *sc; 464 int bit; 465 { 466 if (bit) 467 CSR_WRITE_4(sc, DC_SIO, 468 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 469 else 470 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 471 472 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 473 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 474 475 return; 476 } 477 478 /* 479 * Read a bit from the MII bus. 480 */ 481 int dc_mii_readbit(sc) 482 struct dc_softc *sc; 483 { 484 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 485 CSR_READ_4(sc, DC_SIO); 486 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 487 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 488 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 489 return(1); 490 491 return(0); 492 } 493 494 /* 495 * Sync the PHYs by setting data bit and strobing the clock 32 times. 496 */ 497 void dc_mii_sync(sc) 498 struct dc_softc *sc; 499 { 500 register int i; 501 502 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 503 504 for (i = 0; i < 32; i++) 505 dc_mii_writebit(sc, 1); 506 507 return; 508 } 509 510 /* 511 * Clock a series of bits through the MII. 512 */ 513 void dc_mii_send(sc, bits, cnt) 514 struct dc_softc *sc; 515 u_int32_t bits; 516 int cnt; 517 { 518 int i; 519 520 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 521 dc_mii_writebit(sc, bits & i); 522 } 523 524 /* 525 * Read an PHY register through the MII. 526 */ 527 int dc_mii_readreg(sc, frame) 528 struct dc_softc *sc; 529 struct dc_mii_frame *frame; 530 531 { 532 int i, ack, s; 533 534 s = splimp(); 535 536 /* 537 * Set up frame for RX. 538 */ 539 frame->mii_stdelim = DC_MII_STARTDELIM; 540 frame->mii_opcode = DC_MII_READOP; 541 frame->mii_turnaround = 0; 542 frame->mii_data = 0; 543 544 /* 545 * Sync the PHYs. 546 */ 547 dc_mii_sync(sc); 548 549 /* 550 * Send command/address info. 551 */ 552 dc_mii_send(sc, frame->mii_stdelim, 2); 553 dc_mii_send(sc, frame->mii_opcode, 2); 554 dc_mii_send(sc, frame->mii_phyaddr, 5); 555 dc_mii_send(sc, frame->mii_regaddr, 5); 556 557 #ifdef notdef 558 /* Idle bit */ 559 dc_mii_writebit(sc, 1); 560 dc_mii_writebit(sc, 0); 561 #endif 562 563 /* Check for ack */ 564 ack = dc_mii_readbit(sc); 565 566 /* 567 * Now try reading data bits. If the ack failed, we still 568 * need to clock through 16 cycles to keep the PHY(s) in sync. 569 */ 570 if (ack) { 571 for(i = 0; i < 16; i++) { 572 dc_mii_readbit(sc); 573 } 574 goto fail; 575 } 576 577 for (i = 0x8000; i; i >>= 1) { 578 if (!ack) { 579 if (dc_mii_readbit(sc)) 580 frame->mii_data |= i; 581 } 582 } 583 584 fail: 585 586 dc_mii_writebit(sc, 0); 587 dc_mii_writebit(sc, 0); 588 589 splx(s); 590 591 if (ack) 592 return(1); 593 return(0); 594 } 595 596 /* 597 * Write to a PHY register through the MII. 598 */ 599 int dc_mii_writereg(sc, frame) 600 struct dc_softc *sc; 601 struct dc_mii_frame *frame; 602 603 { 604 int s; 605 606 s = splimp(); 607 /* 608 * Set up frame for TX. 609 */ 610 611 frame->mii_stdelim = DC_MII_STARTDELIM; 612 frame->mii_opcode = DC_MII_WRITEOP; 613 frame->mii_turnaround = DC_MII_TURNAROUND; 614 615 /* 616 * Sync the PHYs. 617 */ 618 dc_mii_sync(sc); 619 620 dc_mii_send(sc, frame->mii_stdelim, 2); 621 dc_mii_send(sc, frame->mii_opcode, 2); 622 dc_mii_send(sc, frame->mii_phyaddr, 5); 623 dc_mii_send(sc, frame->mii_regaddr, 5); 624 dc_mii_send(sc, frame->mii_turnaround, 2); 625 dc_mii_send(sc, frame->mii_data, 16); 626 627 /* Idle bit. */ 628 dc_mii_writebit(sc, 0); 629 dc_mii_writebit(sc, 0); 630 631 splx(s); 632 633 return(0); 634 } 635 636 int dc_miibus_readreg(self, phy, reg) 637 struct device *self; 638 int phy, reg; 639 { 640 struct dc_mii_frame frame; 641 struct dc_softc *sc = (struct dc_softc *)self; 642 int i, rval, phy_reg; 643 644 bzero((char *)&frame, sizeof(frame)); 645 646 /* 647 * Note: both the AL981 and AN983 have internal PHYs, 648 * however the AL981 provides direct access to the PHY 649 * registers while the AN983 uses a serial MII interface. 650 * The AN983's MII interface is also buggy in that you 651 * can read from any MII address (0 to 31), but only address 1 652 * behaves normally. To deal with both cases, we pretend 653 * that the PHY is at MII address 1. 654 */ 655 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 656 return(0); 657 658 if (sc->dc_pmode != DC_PMODE_MII) { 659 if (phy == (MII_NPHY - 1)) { 660 switch(reg) { 661 case MII_BMSR: 662 /* 663 * Fake something to make the probe 664 * code think there's a PHY here. 665 */ 666 return(BMSR_MEDIAMASK); 667 break; 668 case MII_PHYIDR1: 669 if (DC_IS_PNIC(sc)) 670 return(PCI_VENDOR_LITEON); 671 return(PCI_VENDOR_DEC); 672 break; 673 case MII_PHYIDR2: 674 if (DC_IS_PNIC(sc)) 675 return(PCI_PRODUCT_LITEON_PNIC); 676 return(PCI_PRODUCT_DEC_21142); 677 break; 678 default: 679 return(0); 680 break; 681 } 682 } else 683 return(0); 684 } 685 686 if (DC_IS_PNIC(sc)) { 687 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 688 (phy << 23) | (reg << 18)); 689 for (i = 0; i < DC_TIMEOUT; i++) { 690 DELAY(1); 691 rval = CSR_READ_4(sc, DC_PN_MII); 692 if (!(rval & DC_PN_MII_BUSY)) { 693 rval &= 0xFFFF; 694 return(rval == 0xFFFF ? 0 : rval); 695 } 696 } 697 return(0); 698 } 699 700 if (DC_IS_COMET(sc)) { 701 switch(reg) { 702 case MII_BMCR: 703 phy_reg = DC_AL_BMCR; 704 break; 705 case MII_BMSR: 706 phy_reg = DC_AL_BMSR; 707 break; 708 case MII_PHYIDR1: 709 phy_reg = DC_AL_VENID; 710 break; 711 case MII_PHYIDR2: 712 phy_reg = DC_AL_DEVID; 713 break; 714 case MII_ANAR: 715 phy_reg = DC_AL_ANAR; 716 break; 717 case MII_ANLPAR: 718 phy_reg = DC_AL_LPAR; 719 break; 720 case MII_ANER: 721 phy_reg = DC_AL_ANER; 722 break; 723 default: 724 printf("dc%d: phy_read: bad phy register %x\n", 725 sc->dc_unit, reg); 726 return(0); 727 break; 728 } 729 730 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 731 732 if (rval == 0xFFFF) 733 return(0); 734 return(rval); 735 } 736 737 frame.mii_phyaddr = phy; 738 frame.mii_regaddr = reg; 739 if (sc->dc_type == DC_TYPE_98713) { 740 phy_reg = CSR_READ_4(sc, DC_NETCFG); 741 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 742 } 743 dc_mii_readreg(sc, &frame); 744 if (sc->dc_type == DC_TYPE_98713) 745 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 746 747 return(frame.mii_data); 748 } 749 750 void dc_miibus_writereg(self, phy, reg, data) 751 struct device *self; 752 int phy, reg, data; 753 { 754 struct dc_softc *sc = (struct dc_softc *)self; 755 struct dc_mii_frame frame; 756 int i, phy_reg; 757 758 bzero((char *)&frame, sizeof(frame)); 759 760 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 761 return; 762 763 if (DC_IS_PNIC(sc)) { 764 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 765 (phy << 23) | (reg << 10) | data); 766 for (i = 0; i < DC_TIMEOUT; i++) { 767 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 768 break; 769 } 770 return; 771 } 772 773 if (DC_IS_COMET(sc)) { 774 switch(reg) { 775 case MII_BMCR: 776 phy_reg = DC_AL_BMCR; 777 break; 778 case MII_BMSR: 779 phy_reg = DC_AL_BMSR; 780 break; 781 case MII_PHYIDR1: 782 phy_reg = DC_AL_VENID; 783 break; 784 case MII_PHYIDR2: 785 phy_reg = DC_AL_DEVID; 786 break; 787 case MII_ANAR: 788 phy_reg = DC_AL_ANAR; 789 break; 790 case MII_ANLPAR: 791 phy_reg = DC_AL_LPAR; 792 break; 793 case MII_ANER: 794 phy_reg = DC_AL_ANER; 795 break; 796 default: 797 printf("dc%d: phy_write: bad phy register %x\n", 798 sc->dc_unit, reg); 799 return; 800 break; 801 } 802 803 CSR_WRITE_4(sc, phy_reg, data); 804 return; 805 } 806 807 frame.mii_phyaddr = phy; 808 frame.mii_regaddr = reg; 809 frame.mii_data = data; 810 811 if (sc->dc_type == DC_TYPE_98713) { 812 phy_reg = CSR_READ_4(sc, DC_NETCFG); 813 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 814 } 815 dc_mii_writereg(sc, &frame); 816 if (sc->dc_type == DC_TYPE_98713) 817 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 818 819 return; 820 } 821 822 void dc_miibus_statchg(self) 823 struct device *self; 824 { 825 struct dc_softc *sc = (struct dc_softc *)self; 826 struct mii_data *mii; 827 struct ifmedia *ifm; 828 829 if (DC_IS_ADMTEK(sc)) 830 return; 831 832 mii = &sc->sc_mii; 833 ifm = &mii->mii_media; 834 if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 835 dc_setcfg(sc, ifm->ifm_media); 836 sc->dc_if_media = ifm->ifm_media; 837 } else { 838 dc_setcfg(sc, mii->mii_media_active); 839 sc->dc_if_media = mii->mii_media_active; 840 } 841 842 return; 843 } 844 845 #define DC_POLY 0xEDB88320 846 #define DC_BITS_512 9 847 #define DC_BITS_128 7 848 #define DC_BITS_64 6 849 850 u_int32_t dc_crc_le(sc, addr) 851 struct dc_softc *sc; 852 caddr_t addr; 853 { 854 u_int32_t idx, bit, data, crc; 855 856 /* Compute CRC for the address value. */ 857 crc = 0xFFFFFFFF; /* initial value */ 858 859 for (idx = 0; idx < 6; idx++) { 860 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 861 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 862 } 863 864 /* 865 * The hash table on the PNIC II and the MX98715AEC-C/D/E 866 * chips is only 128 bits wide. 867 */ 868 if (sc->dc_flags & DC_128BIT_HASH) 869 return (crc & ((1 << DC_BITS_128) - 1)); 870 871 /* The hash table on the MX98715BEC is only 64 bits wide. */ 872 if (sc->dc_flags & DC_64BIT_HASH) 873 return (crc & ((1 << DC_BITS_64) - 1)); 874 875 /* Xircom's hash filtering table is different (read: weird) */ 876 /* Xircom uses the LEAST significant bits */ 877 if (DC_IS_XIRCOM(sc)) { 878 if ((crc & 0x180) == 0x180) 879 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); 880 else 881 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); 882 } 883 884 return (crc & ((1 << DC_BITS_512) - 1)); 885 } 886 887 /* 888 * Calculate CRC of a multicast group address, return the lower 6 bits. 889 */ 890 u_int32_t dc_crc_be(addr) 891 caddr_t addr; 892 { 893 u_int32_t crc, carry; 894 int i, j; 895 u_int8_t c; 896 897 /* Compute CRC for the address value. */ 898 crc = 0xFFFFFFFF; /* initial value */ 899 900 for (i = 0; i < 6; i++) { 901 c = *(addr + i); 902 for (j = 0; j < 8; j++) { 903 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 904 crc <<= 1; 905 c >>= 1; 906 if (carry) 907 crc = (crc ^ 0x04c11db6) | carry; 908 } 909 } 910 911 /* return the filter bit position */ 912 return((crc >> 26) & 0x0000003F); 913 } 914 915 /* 916 * 21143-style RX filter setup routine. Filter programming is done by 917 * downloading a special setup frame into the TX engine. 21143, Macronix, 918 * PNIC, PNIC II and Davicom chips are programmed this way. 919 * 920 * We always program the chip using 'hash perfect' mode, i.e. one perfect 921 * address (our node address) and a 512-bit hash filter for multicast 922 * frames. We also sneak the broadcast address into the hash filter since 923 * we need that too. 924 */ 925 void dc_setfilt_21143(sc) 926 struct dc_softc *sc; 927 { 928 struct dc_desc *sframe; 929 u_int32_t h, *sp; 930 struct arpcom *ac = &sc->arpcom; 931 struct ether_multi *enm; 932 struct ether_multistep step; 933 struct ifnet *ifp; 934 int i; 935 936 ifp = &sc->arpcom.ac_if; 937 938 i = sc->dc_cdata.dc_tx_prod; 939 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 940 sc->dc_cdata.dc_tx_cnt++; 941 sframe = &sc->dc_ldata->dc_tx_list[i]; 942 sp = &sc->dc_ldata->dc_sbuf[0]; 943 bzero((char *)sp, DC_SFRAME_LEN); 944 945 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr + 946 offsetof(struct dc_list_data, dc_sbuf)); 947 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 948 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 949 950 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = 951 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0]; 952 953 /* If we want promiscuous mode, set the allframes bit. */ 954 if (ifp->if_flags & IFF_PROMISC) 955 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 956 else 957 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 958 959 if (ifp->if_flags & IFF_ALLMULTI) 960 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 961 else 962 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 963 964 ETHER_FIRST_MULTI(step, ac, enm); 965 while (enm != NULL) { 966 h = dc_crc_le(sc, enm->enm_addrlo); 967 sp[h >> 4] |= htole32(1 << (h & 0xF)); 968 ETHER_NEXT_MULTI(step, enm); 969 } 970 971 if (ifp->if_flags & IFF_BROADCAST) { 972 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 973 sp[h >> 4] |= htole32(1 << (h & 0xF)); 974 } 975 976 /* Set our MAC address */ 977 sp[39] = DC_SP_FIELD(sc->arpcom.ac_enaddr, 0); 978 sp[40] = DC_SP_FIELD(sc->arpcom.ac_enaddr, 1); 979 sp[41] = DC_SP_FIELD(sc->arpcom.ac_enaddr, 2); 980 981 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 982 offsetof(struct dc_list_data, dc_sbuf[0]), 983 sizeof(struct dc_list_data) - 984 offsetof(struct dc_list_data, dc_sbuf[0]), 985 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 986 987 sframe->dc_status = htole32(DC_TXSTAT_OWN); 988 989 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 990 offsetof(struct dc_list_data, dc_tx_list[i]), 991 sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 992 993 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 994 995 /* 996 * The PNIC takes an exceedingly long time to process its 997 * setup frame; wait 10ms after posting the setup frame 998 * before proceeding, just so it has time to swallow its 999 * medicine. 1000 */ 1001 DELAY(10000); 1002 1003 ifp->if_timer = 5; 1004 1005 return; 1006 } 1007 1008 void dc_setfilt_admtek(sc) 1009 struct dc_softc *sc; 1010 { 1011 struct ifnet *ifp; 1012 struct arpcom *ac = &sc->arpcom; 1013 struct ether_multi *enm; 1014 struct ether_multistep step; 1015 int h = 0; 1016 u_int32_t hashes[2] = { 0, 0 }; 1017 1018 ifp = &sc->arpcom.ac_if; 1019 1020 /* Init our MAC address */ 1021 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1022 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1023 1024 /* If we want promiscuous mode, set the allframes bit. */ 1025 if (ifp->if_flags & IFF_PROMISC) 1026 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1027 else 1028 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1029 1030 if (ifp->if_flags & IFF_ALLMULTI) 1031 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1032 else 1033 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1034 1035 /* first, zot all the existing hash bits */ 1036 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1037 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1038 1039 /* 1040 * If we're already in promisc or allmulti mode, we 1041 * don't have to bother programming the multicast filter. 1042 */ 1043 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1044 return; 1045 1046 /* now program new ones */ 1047 ETHER_FIRST_MULTI(step, ac, enm); 1048 while (enm != NULL) { 1049 h = dc_crc_be(enm->enm_addrlo); 1050 if (h < 32) 1051 hashes[0] |= (1 << h); 1052 else 1053 hashes[1] |= (1 << (h - 32)); 1054 ETHER_NEXT_MULTI(step, enm); 1055 } 1056 1057 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1058 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1059 1060 return; 1061 } 1062 1063 void dc_setfilt_asix(sc) 1064 struct dc_softc *sc; 1065 { 1066 struct ifnet *ifp; 1067 struct arpcom *ac = &sc->arpcom; 1068 struct ether_multi *enm; 1069 struct ether_multistep step; 1070 int h = 0; 1071 u_int32_t hashes[2] = { 0, 0 }; 1072 1073 ifp = &sc->arpcom.ac_if; 1074 1075 /* Init our MAC address */ 1076 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1077 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1078 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1079 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1080 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1081 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1082 1083 /* If we want promiscuous mode, set the allframes bit. */ 1084 if (ifp->if_flags & IFF_PROMISC) 1085 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1086 else 1087 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1088 1089 if (ifp->if_flags & IFF_ALLMULTI) 1090 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1091 else 1092 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1093 1094 /* 1095 * The ASIX chip has a special bit to enable reception 1096 * of broadcast frames. 1097 */ 1098 if (ifp->if_flags & IFF_BROADCAST) 1099 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1100 else 1101 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1102 1103 /* first, zot all the existing hash bits */ 1104 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1105 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1106 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1107 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1108 1109 /* 1110 * If we're already in promisc or allmulti mode, we 1111 * don't have to bother programming the multicast filter. 1112 */ 1113 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1114 return; 1115 1116 /* now program new ones */ 1117 ETHER_FIRST_MULTI(step, ac, enm); 1118 while (enm != NULL) { 1119 h = dc_crc_be(enm->enm_addrlo); 1120 if (h < 32) 1121 hashes[0] |= (1 << h); 1122 else 1123 hashes[1] |= (1 << (h - 32)); 1124 ETHER_NEXT_MULTI(step, enm); 1125 } 1126 1127 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1128 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1129 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1130 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1131 1132 return; 1133 } 1134 1135 void dc_setfilt_xircom(sc) 1136 struct dc_softc *sc; 1137 { 1138 struct dc_desc *sframe; 1139 struct arpcom *ac = &sc->arpcom; 1140 struct ether_multi *enm; 1141 struct ether_multistep step; 1142 u_int32_t h, *sp; 1143 struct ifnet *ifp; 1144 int i; 1145 1146 ifp = &sc->arpcom.ac_if; 1147 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1148 1149 i = sc->dc_cdata.dc_tx_prod; 1150 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1151 sc->dc_cdata.dc_tx_cnt++; 1152 sframe = &sc->dc_ldata->dc_tx_list[i]; 1153 sp = &sc->dc_ldata->dc_sbuf[0]; 1154 bzero((char *)sp, DC_SFRAME_LEN); 1155 1156 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr + 1157 offsetof(struct dc_list_data, dc_sbuf)); 1158 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1159 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1160 1161 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = 1162 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0]; 1163 1164 /* If we want promiscuous mode, set the allframes bit. */ 1165 if (ifp->if_flags & IFF_PROMISC) 1166 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1167 else 1168 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1169 1170 if (ifp->if_flags & IFF_ALLMULTI) 1171 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1172 else 1173 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1174 1175 /* now program new ones */ 1176 ETHER_FIRST_MULTI(step, ac, enm); 1177 while (enm != NULL) { 1178 h = dc_crc_le(sc, enm->enm_addrlo); 1179 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1180 ETHER_NEXT_MULTI(step, enm); 1181 } 1182 1183 if (ifp->if_flags & IFF_BROADCAST) { 1184 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1185 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1186 } 1187 1188 /* Set our MAC address */ 1189 sp[0] = DC_SP_FIELD(sc->arpcom.ac_enaddr, 0); 1190 sp[1] = DC_SP_FIELD(sc->arpcom.ac_enaddr, 1); 1191 sp[2] = DC_SP_FIELD(sc->arpcom.ac_enaddr, 2); 1192 1193 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1194 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1195 ifp->if_flags |= IFF_RUNNING; 1196 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1197 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1198 1199 /* 1200 * wait some time... 1201 */ 1202 DELAY(1000); 1203 1204 ifp->if_timer = 5; 1205 1206 return; 1207 } 1208 1209 void dc_setfilt(sc) 1210 struct dc_softc *sc; 1211 { 1212 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1213 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc)) 1214 dc_setfilt_21143(sc); 1215 1216 if (DC_IS_ASIX(sc)) 1217 dc_setfilt_asix(sc); 1218 1219 if (DC_IS_ADMTEK(sc)) 1220 dc_setfilt_admtek(sc); 1221 1222 if (DC_IS_XIRCOM(sc)) 1223 dc_setfilt_xircom(sc); 1224 1225 return; 1226 } 1227 1228 /* 1229 * In order to fiddle with the 1230 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1231 * first have to put the transmit and/or receive logic in the idle state. 1232 */ 1233 void dc_setcfg(sc, media) 1234 struct dc_softc *sc; 1235 int media; 1236 { 1237 int i, restart = 0; 1238 u_int32_t isr; 1239 1240 if (IFM_SUBTYPE(media) == IFM_NONE) 1241 return; 1242 1243 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1244 restart = 1; 1245 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1246 1247 for (i = 0; i < DC_TIMEOUT; i++) { 1248 DELAY(10); 1249 isr = CSR_READ_4(sc, DC_ISR); 1250 if (isr & DC_ISR_TX_IDLE || 1251 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) 1252 break; 1253 } 1254 1255 if (i == DC_TIMEOUT) 1256 printf("dc%d: failed to force tx and " 1257 "rx to idle state\n", sc->dc_unit); 1258 1259 } 1260 1261 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1262 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1263 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1264 if (sc->dc_pmode == DC_PMODE_MII) { 1265 int watchdogreg; 1266 1267 if (DC_IS_INTEL(sc)) { 1268 /* there's a write enable bit here that reads as 1 */ 1269 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1270 watchdogreg &= ~DC_WDOG_CTLWREN; 1271 watchdogreg |= DC_WDOG_JABBERDIS; 1272 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1273 } else { 1274 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1275 } 1276 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1277 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1278 if (sc->dc_type == DC_TYPE_98713) 1279 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1280 DC_NETCFG_SCRAMBLER)); 1281 if (!DC_IS_DAVICOM(sc)) 1282 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1283 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1284 if (DC_IS_INTEL(sc)) 1285 dc_apply_fixup(sc, IFM_AUTO); 1286 } else { 1287 if (DC_IS_PNIC(sc)) { 1288 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1289 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1290 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1291 } 1292 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1293 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1294 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1295 if (DC_IS_INTEL(sc)) 1296 dc_apply_fixup(sc, 1297 (media & IFM_GMASK) == IFM_FDX ? 1298 IFM_100_TX|IFM_FDX : IFM_100_TX); 1299 } 1300 } 1301 1302 if (IFM_SUBTYPE(media) == IFM_10_T) { 1303 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1304 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1305 if (sc->dc_pmode == DC_PMODE_MII) { 1306 int watchdogreg; 1307 1308 if (DC_IS_INTEL(sc)) { 1309 /* there's a write enable bit here that reads as 1 */ 1310 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1311 watchdogreg &= ~DC_WDOG_CTLWREN; 1312 watchdogreg |= DC_WDOG_JABBERDIS; 1313 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1314 } else { 1315 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1316 } 1317 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1318 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1319 if (sc->dc_type == DC_TYPE_98713) 1320 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1321 if (!DC_IS_DAVICOM(sc)) 1322 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1323 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1324 if (DC_IS_INTEL(sc)) 1325 dc_apply_fixup(sc, IFM_AUTO); 1326 } else { 1327 if (DC_IS_PNIC(sc)) { 1328 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1329 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1330 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1331 } 1332 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1333 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1334 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1335 if (DC_IS_INTEL(sc)) { 1336 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1337 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1338 if ((media & IFM_GMASK) == IFM_FDX) 1339 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1340 else 1341 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1342 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1343 DC_CLRBIT(sc, DC_10BTCTRL, 1344 DC_TCTL_AUTONEGENBL); 1345 dc_apply_fixup(sc, 1346 (media & IFM_GMASK) == IFM_FDX ? 1347 IFM_10_T|IFM_FDX : IFM_10_T); 1348 DELAY(20000); 1349 } 1350 } 1351 } 1352 1353 /* 1354 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1355 * PHY and we want HomePNA mode, set the portsel bit to turn 1356 * on the external MII port. 1357 */ 1358 if (DC_IS_DAVICOM(sc)) { 1359 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1360 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1361 sc->dc_link = 1; 1362 } else { 1363 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1364 } 1365 } 1366 1367 if ((media & IFM_GMASK) == IFM_FDX) { 1368 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1369 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1370 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1371 } else { 1372 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1373 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1374 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1375 } 1376 1377 if (restart) 1378 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1379 1380 return; 1381 } 1382 1383 void dc_reset(sc) 1384 struct dc_softc *sc; 1385 { 1386 register int i; 1387 1388 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1389 1390 for (i = 0; i < DC_TIMEOUT; i++) { 1391 DELAY(10); 1392 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1393 break; 1394 } 1395 1396 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || 1397 DC_IS_INTEL(sc)) { 1398 DELAY(10000); 1399 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1400 i = 0; 1401 } 1402 1403 if (i == DC_TIMEOUT) 1404 printf("dc%d: reset never completed!\n", sc->dc_unit); 1405 1406 /* Wait a little while for the chip to get its brains in order. */ 1407 DELAY(1000); 1408 1409 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1410 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1411 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1412 1413 /* 1414 * Bring the SIA out of reset. In some cases, it looks 1415 * like failing to unreset the SIA soon enough gets it 1416 * into a state where it will never come out of reset 1417 * until we reset the whole chip again. 1418 */ 1419 if (DC_IS_INTEL(sc)) { 1420 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1421 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1422 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1423 } 1424 1425 return; 1426 } 1427 1428 void dc_apply_fixup(sc, media) 1429 struct dc_softc *sc; 1430 int media; 1431 { 1432 struct dc_mediainfo *m; 1433 u_int8_t *p; 1434 int i; 1435 u_int32_t reg; 1436 1437 m = sc->dc_mi; 1438 1439 while (m != NULL) { 1440 if (m->dc_media == media) 1441 break; 1442 m = m->dc_next; 1443 } 1444 1445 if (m == NULL) 1446 return; 1447 1448 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1449 reg = (p[0] | (p[1] << 8)) << 16; 1450 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1451 } 1452 1453 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1454 reg = (p[0] | (p[1] << 8)) << 16; 1455 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1456 } 1457 1458 return; 1459 } 1460 1461 void dc_decode_leaf_sia(sc, l) 1462 struct dc_softc *sc; 1463 struct dc_eblock_sia *l; 1464 { 1465 struct dc_mediainfo *m; 1466 1467 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1468 bzero(m, sizeof(struct dc_mediainfo)); 1469 if (l->dc_sia_code == DC_SIA_CODE_10BT) 1470 m->dc_media = IFM_10_T; 1471 1472 if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) 1473 m->dc_media = IFM_10_T|IFM_FDX; 1474 1475 if (l->dc_sia_code == DC_SIA_CODE_10B2) 1476 m->dc_media = IFM_10_2; 1477 1478 if (l->dc_sia_code == DC_SIA_CODE_10B5) 1479 m->dc_media = IFM_10_5; 1480 1481 m->dc_gp_len = 2; 1482 m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; 1483 1484 m->dc_next = sc->dc_mi; 1485 sc->dc_mi = m; 1486 1487 sc->dc_pmode = DC_PMODE_SIA; 1488 1489 return; 1490 } 1491 1492 void dc_decode_leaf_sym(sc, l) 1493 struct dc_softc *sc; 1494 struct dc_eblock_sym *l; 1495 { 1496 struct dc_mediainfo *m; 1497 1498 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1499 bzero(m, sizeof(struct dc_mediainfo)); 1500 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1501 m->dc_media = IFM_100_TX; 1502 1503 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1504 m->dc_media = IFM_100_TX|IFM_FDX; 1505 1506 m->dc_gp_len = 2; 1507 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1508 1509 m->dc_next = sc->dc_mi; 1510 sc->dc_mi = m; 1511 1512 sc->dc_pmode = DC_PMODE_SYM; 1513 1514 return; 1515 } 1516 1517 void dc_decode_leaf_mii(sc, l) 1518 struct dc_softc *sc; 1519 struct dc_eblock_mii *l; 1520 { 1521 u_int8_t *p; 1522 struct dc_mediainfo *m; 1523 1524 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1525 bzero(m, sizeof(struct dc_mediainfo)); 1526 /* We abuse IFM_AUTO to represent MII. */ 1527 m->dc_media = IFM_AUTO; 1528 m->dc_gp_len = l->dc_gpr_len; 1529 1530 p = (u_int8_t *)l; 1531 p += sizeof(struct dc_eblock_mii); 1532 m->dc_gp_ptr = p; 1533 p += 2 * l->dc_gpr_len; 1534 m->dc_reset_len = *p; 1535 p++; 1536 m->dc_reset_ptr = p; 1537 1538 m->dc_next = sc->dc_mi; 1539 sc->dc_mi = m; 1540 1541 return; 1542 } 1543 1544 void dc_read_srom(sc, bits) 1545 struct dc_softc *sc; 1546 int bits; 1547 { 1548 int size; 1549 1550 size = 2 << bits; 1551 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1552 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1553 } 1554 1555 void dc_parse_21143_srom(sc) 1556 struct dc_softc *sc; 1557 { 1558 struct dc_leaf_hdr *lhdr; 1559 struct dc_eblock_hdr *hdr; 1560 int i, loff; 1561 char *ptr; 1562 1563 loff = sc->dc_srom[27]; 1564 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1565 1566 ptr = (char *)lhdr; 1567 ptr += sizeof(struct dc_leaf_hdr) - 1; 1568 for (i = 0; i < lhdr->dc_mcnt; i++) { 1569 hdr = (struct dc_eblock_hdr *)ptr; 1570 switch(hdr->dc_type) { 1571 case DC_EBLOCK_MII: 1572 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1573 break; 1574 case DC_EBLOCK_SIA: 1575 dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); 1576 break; 1577 case DC_EBLOCK_SYM: 1578 dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); 1579 break; 1580 default: 1581 /* Don't care. Yet. */ 1582 break; 1583 } 1584 ptr += (hdr->dc_len & 0x7F); 1585 ptr++; 1586 } 1587 1588 return; 1589 } 1590 1591 /* 1592 * Attach the interface. Allocate softc structures, do ifmedia 1593 * setup and ethernet/BPF attach. 1594 */ 1595 void dc_attach(sc) 1596 struct dc_softc *sc; 1597 { 1598 struct ifnet *ifp; 1599 int error = 0, mac_offset, tmp, i; 1600 1601 /* 1602 * Get station address from the EEPROM. 1603 */ 1604 if (sc->sc_hasmac) 1605 goto hasmac; 1606 1607 switch(sc->dc_type) { 1608 case DC_TYPE_98713: 1609 case DC_TYPE_98713A: 1610 case DC_TYPE_987x5: 1611 case DC_TYPE_PNICII: 1612 dc_read_eeprom(sc, (caddr_t)&mac_offset, 1613 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 1614 dc_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1615 (mac_offset / 2), 3, 0); 1616 break; 1617 case DC_TYPE_PNIC: 1618 dc_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 1); 1619 break; 1620 case DC_TYPE_DM9102: 1621 case DC_TYPE_21143: 1622 case DC_TYPE_ASIX: 1623 dc_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1624 DC_EE_NODEADDR, 3, 0); 1625 break; 1626 case DC_TYPE_AL981: 1627 case DC_TYPE_AN983: 1628 bcopy(&sc->dc_srom[DC_AL_EE_NODEADDR], &sc->arpcom.ac_enaddr, 1629 ETHER_ADDR_LEN); 1630 break; 1631 case DC_TYPE_XIRCOM: 1632 break; 1633 default: 1634 dc_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1635 DC_EE_NODEADDR, 3, 0); 1636 break; 1637 } 1638 hasmac: 1639 1640 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data), 1641 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 1642 BUS_DMA_NOWAIT) != 0) { 1643 printf(": can't alloc list mem\n"); 1644 goto fail; 1645 } 1646 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 1647 sizeof(struct dc_list_data), &sc->sc_listkva, 1648 BUS_DMA_NOWAIT) != 0) { 1649 printf(": can't map list mem\n"); 1650 goto fail; 1651 } 1652 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1, 1653 sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT, 1654 &sc->sc_listmap) != 0) { 1655 printf(": can't alloc list map\n"); 1656 goto fail; 1657 } 1658 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 1659 sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 1660 printf(": can't load list map\n"); 1661 goto fail; 1662 } 1663 sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva; 1664 bzero(sc->dc_ldata, sizeof(struct dc_list_data)); 1665 1666 for (i = 0; i < DC_RX_LIST_CNT; i++) { 1667 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1668 0, BUS_DMA_NOWAIT, 1669 &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) { 1670 printf(": can't create rx map\n"); 1671 return; 1672 } 1673 } 1674 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1675 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 1676 printf(": can't create rx spare map\n"); 1677 return; 1678 } 1679 1680 for (i = 0; i < DC_TX_LIST_CNT; i++) { 1681 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1682 DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT, 1683 &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) { 1684 printf(": can't create tx map\n"); 1685 return; 1686 } 1687 } 1688 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5, 1689 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 1690 printf(": can't create tx spare map\n"); 1691 return; 1692 } 1693 1694 /* 1695 * A 21143 or clone chip was detected. Inform the world. 1696 */ 1697 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 1698 1699 ifp = &sc->arpcom.ac_if; 1700 ifp->if_softc = sc; 1701 ifp->if_mtu = ETHERMTU; 1702 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1703 ifp->if_ioctl = dc_ioctl; 1704 ifp->if_output = ether_output; 1705 ifp->if_start = dc_start; 1706 ifp->if_watchdog = dc_watchdog; 1707 ifp->if_baudrate = 10000000; 1708 IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); 1709 IFQ_SET_READY(&ifp->if_snd); 1710 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1711 1712 #if NVLAN > 0 1713 ifp->if_capabilities = IFCAP_VLAN_MTU; 1714 #endif 1715 1716 /* Do MII setup. If this is a 21143, check for a PHY on the 1717 * MII bus after applying any necessary fixups to twiddle the 1718 * GPIO bits. If we don't end up finding a PHY, restore the 1719 * old selection (SIA only or SIA/SYM) and attach the dcphy 1720 * driver instead. 1721 */ 1722 if (DC_IS_INTEL(sc)) { 1723 dc_apply_fixup(sc, IFM_AUTO); 1724 tmp = sc->dc_pmode; 1725 sc->dc_pmode = DC_PMODE_MII; 1726 } 1727 1728 sc->sc_mii.mii_ifp = ifp; 1729 sc->sc_mii.mii_readreg = dc_miibus_readreg; 1730 sc->sc_mii.mii_writereg = dc_miibus_writereg; 1731 sc->sc_mii.mii_statchg = dc_miibus_statchg; 1732 ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts); 1733 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1734 MII_OFFSET_ANY, 0); 1735 1736 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1737 error = ENXIO; 1738 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1739 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1740 } else 1741 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1742 1743 if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A) 1744 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL); 1745 1746 if (DC_IS_INTEL(sc)) { 1747 if (error) { 1748 sc->dc_pmode = tmp; 1749 if (sc->dc_pmode != DC_PMODE_SIA) 1750 sc->dc_pmode = DC_PMODE_SYM; 1751 sc->dc_flags |= DC_21143_NWAY; 1752 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, 1753 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1754 error = 0; 1755 } else { 1756 /* we have a PHY, so we must clear this bit */ 1757 sc->dc_flags &= ~DC_TULIP_LEDS; 1758 } 1759 } 1760 1761 if (error) { 1762 printf("dc%d: MII without any PHY!\n", sc->dc_unit); 1763 error = ENXIO; 1764 goto fail; 1765 } 1766 1767 if (DC_IS_XIRCOM(sc)) { 1768 /* 1769 * setup General Purpose Port mode and data so the tulip 1770 * can talk to the MII. 1771 */ 1772 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 1773 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 1774 DELAY(10); 1775 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 1776 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 1777 DELAY(10); 1778 } 1779 1780 /* 1781 * Call MI attach routines. 1782 */ 1783 if_attach(ifp); 1784 ether_ifattach(ifp); 1785 1786 sc->sc_dhook = shutdownhook_establish(dc_shutdown, sc); 1787 1788 fail: 1789 return; 1790 } 1791 1792 int dc_detach(sc) 1793 struct dc_softc *sc; 1794 { 1795 struct ifnet *ifp = &sc->arpcom.ac_if; 1796 1797 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 1798 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1799 1800 if (sc->dc_srom) 1801 free(sc->dc_srom, M_DEVBUF); 1802 1803 timeout_del(&sc->dc_tick_tmo); 1804 1805 ether_ifdetach(ifp); 1806 if_detach(ifp); 1807 1808 shutdownhook_disestablish(sc->sc_dhook); 1809 1810 return (0); 1811 } 1812 1813 /* 1814 * Initialize the transmit descriptors. 1815 */ 1816 int dc_list_tx_init(sc) 1817 struct dc_softc *sc; 1818 { 1819 struct dc_chain_data *cd; 1820 struct dc_list_data *ld; 1821 int i; 1822 bus_addr_t next; 1823 1824 cd = &sc->dc_cdata; 1825 ld = sc->dc_ldata; 1826 for (i = 0; i < DC_TX_LIST_CNT; i++) { 1827 next = sc->sc_listmap->dm_segs[0].ds_addr; 1828 if (i == (DC_TX_LIST_CNT - 1)) 1829 next += 1830 offsetof(struct dc_list_data, dc_tx_list[0]); 1831 else 1832 next += 1833 offsetof(struct dc_list_data, dc_tx_list[i + 1]); 1834 cd->dc_tx_chain[i].sd_mbuf = NULL; 1835 ld->dc_tx_list[i].dc_data = htole32(0); 1836 ld->dc_tx_list[i].dc_ctl = htole32(0); 1837 ld->dc_tx_list[i].dc_next = htole32(next); 1838 } 1839 1840 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 1841 1842 return(0); 1843 } 1844 1845 1846 /* 1847 * Initialize the RX descriptors and allocate mbufs for them. Note that 1848 * we arrange the descriptors in a closed ring, so that the last descriptor 1849 * points back to the first. 1850 */ 1851 int dc_list_rx_init(sc) 1852 struct dc_softc *sc; 1853 { 1854 struct dc_chain_data *cd; 1855 struct dc_list_data *ld; 1856 int i; 1857 bus_addr_t next; 1858 1859 cd = &sc->dc_cdata; 1860 ld = sc->dc_ldata; 1861 1862 for (i = 0; i < DC_RX_LIST_CNT; i++) { 1863 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 1864 return(ENOBUFS); 1865 next = sc->sc_listmap->dm_segs[0].ds_addr; 1866 if (i == (DC_RX_LIST_CNT - 1)) 1867 next += 1868 offsetof(struct dc_list_data, dc_rx_list[0]); 1869 else 1870 next += 1871 offsetof(struct dc_list_data, dc_rx_list[i + 1]); 1872 ld->dc_rx_list[i].dc_next = htole32(next); 1873 } 1874 1875 cd->dc_rx_prod = 0; 1876 1877 return(0); 1878 } 1879 1880 /* 1881 * Initialize an RX descriptor and attach an MBUF cluster. 1882 */ 1883 int dc_newbuf(sc, i, m) 1884 struct dc_softc *sc; 1885 int i; 1886 struct mbuf *m; 1887 { 1888 struct mbuf *m_new = NULL; 1889 struct dc_desc *c; 1890 bus_dmamap_t map; 1891 1892 c = &sc->dc_ldata->dc_rx_list[i]; 1893 1894 if (m == NULL) { 1895 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1896 if (m_new == NULL) { 1897 printf("dc%d: no memory for rx list " 1898 "-- packet dropped!\n", sc->dc_unit); 1899 return(ENOBUFS); 1900 } 1901 1902 MCLGET(m_new, M_DONTWAIT); 1903 if (!(m_new->m_flags & M_EXT)) { 1904 printf("dc%d: no memory for rx list " 1905 "-- packet dropped!\n", sc->dc_unit); 1906 m_freem(m_new); 1907 return(ENOBUFS); 1908 } 1909 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1910 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap, 1911 mtod(m_new, caddr_t), MCLBYTES, NULL, 1912 BUS_DMA_NOWAIT) != 0) { 1913 printf("%s: rx load failed\n", sc->sc_dev.dv_xname); 1914 m_freem(m_new); 1915 return (ENOBUFS); 1916 } 1917 map = sc->dc_cdata.dc_rx_chain[i].sd_map; 1918 sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap; 1919 sc->sc_rx_sparemap = map; 1920 } else { 1921 m_new = m; 1922 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1923 m_new->m_data = m_new->m_ext.ext_buf; 1924 } 1925 1926 m_adj(m_new, sizeof(u_int64_t)); 1927 1928 /* 1929 * If this is a PNIC chip, zero the buffer. This is part 1930 * of the workaround for the receive bug in the 82c168 and 1931 * 82c169 chips. 1932 */ 1933 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 1934 bzero((char *)mtod(m_new, char *), m_new->m_len); 1935 1936 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0, 1937 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize, 1938 BUS_DMASYNC_PREREAD); 1939 1940 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new; 1941 c->dc_data = htole32( 1942 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr + 1943 sizeof(u_int64_t)); 1944 c->dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); 1945 c->dc_status = htole32(DC_RXSTAT_OWN); 1946 1947 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1948 offsetof(struct dc_list_data, dc_rx_list[i]), 1949 sizeof(struct dc_desc), 1950 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1951 1952 return(0); 1953 } 1954 1955 /* 1956 * Grrrrr. 1957 * The PNIC chip has a terrible bug in it that manifests itself during 1958 * periods of heavy activity. The exact mode of failure if difficult to 1959 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 1960 * will happen on slow machines. The bug is that sometimes instead of 1961 * uploading one complete frame during reception, it uploads what looks 1962 * like the entire contents of its FIFO memory. The frame we want is at 1963 * the end of the whole mess, but we never know exactly how much data has 1964 * been uploaded, so salvaging the frame is hard. 1965 * 1966 * There is only one way to do it reliably, and it's disgusting. 1967 * Here's what we know: 1968 * 1969 * - We know there will always be somewhere between one and three extra 1970 * descriptors uploaded. 1971 * 1972 * - We know the desired received frame will always be at the end of the 1973 * total data upload. 1974 * 1975 * - We know the size of the desired received frame because it will be 1976 * provided in the length field of the status word in the last descriptor. 1977 * 1978 * Here's what we do: 1979 * 1980 * - When we allocate buffers for the receive ring, we bzero() them. 1981 * This means that we know that the buffer contents should be all 1982 * zeros, except for data uploaded by the chip. 1983 * 1984 * - We also force the PNIC chip to upload frames that include the 1985 * ethernet CRC at the end. 1986 * 1987 * - We gather all of the bogus frame data into a single buffer. 1988 * 1989 * - We then position a pointer at the end of this buffer and scan 1990 * backwards until we encounter the first non-zero byte of data. 1991 * This is the end of the received frame. We know we will encounter 1992 * some data at the end of the frame because the CRC will always be 1993 * there, so even if the sender transmits a packet of all zeros, 1994 * we won't be fooled. 1995 * 1996 * - We know the size of the actual received frame, so we subtract 1997 * that value from the current pointer location. This brings us 1998 * to the start of the actual received packet. 1999 * 2000 * - We copy this into an mbuf and pass it on, along with the actual 2001 * frame length. 2002 * 2003 * The performance hit is tremendous, but it beats dropping frames all 2004 * the time. 2005 */ 2006 2007 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2008 void dc_pnic_rx_bug_war(sc, idx) 2009 struct dc_softc *sc; 2010 int idx; 2011 { 2012 struct dc_desc *cur_rx; 2013 struct dc_desc *c = NULL; 2014 struct mbuf *m = NULL; 2015 unsigned char *ptr; 2016 int i, total_len; 2017 u_int32_t rxstat = 0; 2018 2019 i = sc->dc_pnic_rx_bug_save; 2020 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2021 ptr = sc->dc_pnic_rx_buf; 2022 bzero(ptr, sizeof(DC_RXLEN * 5)); 2023 2024 /* Copy all the bytes from the bogus buffers. */ 2025 while (1) { 2026 c = &sc->dc_ldata->dc_rx_list[i]; 2027 rxstat = letoh32(c->dc_status); 2028 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf; 2029 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2030 ptr += DC_RXLEN; 2031 /* If this is the last buffer, break out. */ 2032 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2033 break; 2034 dc_newbuf(sc, i, m); 2035 DC_INC(i, DC_RX_LIST_CNT); 2036 } 2037 2038 /* Find the length of the actual receive frame. */ 2039 total_len = DC_RXBYTES(rxstat); 2040 2041 /* Scan backwards until we hit a non-zero byte. */ 2042 while(*ptr == 0x00) 2043 ptr--; 2044 2045 /* Round off. */ 2046 if ((unsigned long)(ptr) & 0x3) 2047 ptr -= 1; 2048 2049 /* Now find the start of the frame. */ 2050 ptr -= total_len; 2051 if (ptr < sc->dc_pnic_rx_buf) 2052 ptr = sc->dc_pnic_rx_buf; 2053 2054 /* 2055 * Now copy the salvaged frame to the last mbuf and fake up 2056 * the status word to make it look like a successful 2057 * frame reception. 2058 */ 2059 dc_newbuf(sc, i, m); 2060 bcopy(ptr, mtod(m, char *), total_len); 2061 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2062 2063 return; 2064 } 2065 2066 /* 2067 * This routine searches the RX ring for dirty descriptors in the 2068 * event that the rxeof routine falls out of sync with the chip's 2069 * current descriptor pointer. This may happen sometimes as a result 2070 * of a "no RX buffer available" condition that happens when the chip 2071 * consumes all of the RX buffers before the driver has a chance to 2072 * process the RX ring. This routine may need to be called more than 2073 * once to bring the driver back in sync with the chip, however we 2074 * should still be getting RX DONE interrupts to drive the search 2075 * for new packets in the RX ring, so we should catch up eventually. 2076 */ 2077 int dc_rx_resync(sc) 2078 struct dc_softc *sc; 2079 { 2080 int i, pos; 2081 struct dc_desc *cur_rx; 2082 2083 pos = sc->dc_cdata.dc_rx_prod; 2084 2085 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2086 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2087 offsetof(struct dc_list_data, dc_rx_list[pos]), 2088 sizeof(struct dc_desc), 2089 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2090 2091 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2092 if (!(cur_rx->dc_status & htole32(DC_RXSTAT_OWN))) 2093 break; 2094 DC_INC(pos, DC_RX_LIST_CNT); 2095 } 2096 2097 /* If the ring really is empty, then just return. */ 2098 if (i == DC_RX_LIST_CNT) 2099 return(0); 2100 2101 /* We've fallen behing the chip: catch it. */ 2102 sc->dc_cdata.dc_rx_prod = pos; 2103 2104 return(EAGAIN); 2105 } 2106 2107 /* 2108 * A frame has been uploaded: pass the resulting mbuf chain up to 2109 * the higher level protocols. 2110 */ 2111 void dc_rxeof(sc) 2112 struct dc_softc *sc; 2113 { 2114 struct mbuf *m; 2115 struct ifnet *ifp; 2116 struct dc_desc *cur_rx; 2117 int i, total_len = 0; 2118 u_int32_t rxstat; 2119 2120 ifp = &sc->arpcom.ac_if; 2121 i = sc->dc_cdata.dc_rx_prod; 2122 2123 while(!(sc->dc_ldata->dc_rx_list[i].dc_status & 2124 htole32(DC_RXSTAT_OWN))) { 2125 struct mbuf *m0 = NULL; 2126 2127 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2128 offsetof(struct dc_list_data, dc_rx_list[i]), 2129 sizeof(struct dc_desc), 2130 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2131 2132 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2133 rxstat = letoh32(cur_rx->dc_status); 2134 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf; 2135 total_len = DC_RXBYTES(rxstat); 2136 2137 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2138 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2139 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2140 sc->dc_pnic_rx_bug_save = i; 2141 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2142 DC_INC(i, DC_RX_LIST_CNT); 2143 continue; 2144 } 2145 dc_pnic_rx_bug_war(sc, i); 2146 rxstat = letoh32(cur_rx->dc_status); 2147 total_len = DC_RXBYTES(rxstat); 2148 } 2149 } 2150 2151 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL; 2152 2153 /* 2154 * If an error occurs, update stats, clear the 2155 * status word and leave the mbuf cluster in place: 2156 * it should simply get re-used next time this descriptor 2157 * comes up in the ring. 2158 */ 2159 if (rxstat & DC_RXSTAT_RXERR 2160 #if NVLAN > 0 2161 /* 2162 * If VLANs are enabled, allow frames up to 4 bytes 2163 * longer than the MTU. This should really check if 2164 * the giant packet has a vlan tag 2165 */ 2166 && ((rxstat & (DC_RXSTAT_GIANT|DC_RXSTAT_LASTFRAG)) == 0 2167 && total_len <= ifp->if_mtu + 4) 2168 #endif 2169 ) { 2170 ifp->if_ierrors++; 2171 if (rxstat & DC_RXSTAT_COLLSEEN) 2172 ifp->if_collisions++; 2173 dc_newbuf(sc, i, m); 2174 if (rxstat & DC_RXSTAT_CRCERR) { 2175 DC_INC(i, DC_RX_LIST_CNT); 2176 continue; 2177 } else { 2178 dc_init(sc); 2179 return; 2180 } 2181 } 2182 2183 /* No errors; receive the packet. */ 2184 total_len -= ETHER_CRC_LEN; 2185 2186 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 2187 0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize, 2188 BUS_DMASYNC_POSTREAD); 2189 2190 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 2191 total_len + ETHER_ALIGN, 0, ifp, NULL); 2192 dc_newbuf(sc, i, m); 2193 DC_INC(i, DC_RX_LIST_CNT); 2194 if (m0 == NULL) { 2195 ifp->if_ierrors++; 2196 continue; 2197 } 2198 m_adj(m0, ETHER_ALIGN); 2199 m = m0; 2200 2201 ifp->if_ipackets++; 2202 2203 #if NBPFILTER > 0 2204 if (ifp->if_bpf) 2205 bpf_mtap(ifp->if_bpf, m); 2206 #endif 2207 ether_input_mbuf(ifp, m); 2208 } 2209 2210 sc->dc_cdata.dc_rx_prod = i; 2211 2212 return; 2213 } 2214 2215 /* 2216 * A frame was downloaded to the chip. It's safe for us to clean up 2217 * the list buffers. 2218 */ 2219 2220 void dc_txeof(sc) 2221 struct dc_softc *sc; 2222 { 2223 struct dc_desc *cur_tx = NULL; 2224 struct ifnet *ifp; 2225 int idx; 2226 2227 ifp = &sc->arpcom.ac_if; 2228 2229 /* Clear the timeout timer. */ 2230 ifp->if_timer = 0; 2231 2232 /* 2233 * Go through our tx list and free mbufs for those 2234 * frames that have been transmitted. 2235 */ 2236 idx = sc->dc_cdata.dc_tx_cons; 2237 while(idx != sc->dc_cdata.dc_tx_prod) { 2238 u_int32_t txstat; 2239 2240 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2241 offsetof(struct dc_list_data, dc_tx_list[idx]), 2242 sizeof(struct dc_desc), 2243 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2244 2245 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2246 txstat = letoh32(cur_tx->dc_status); 2247 2248 if (txstat & DC_TXSTAT_OWN) 2249 break; 2250 2251 if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) || 2252 cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) { 2253 sc->dc_cdata.dc_tx_cnt--; 2254 if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) { 2255 /* 2256 * Yes, the PNIC is so brain damaged 2257 * that it will sometimes generate a TX 2258 * underrun error while DMAing the RX 2259 * filter setup frame. If we detect this, 2260 * we have to send the setup frame again, 2261 * or else the filter won't be programmed 2262 * correctly. 2263 */ 2264 if (DC_IS_PNIC(sc)) { 2265 if (txstat & DC_TXSTAT_ERRSUM) 2266 dc_setfilt(sc); 2267 } 2268 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL; 2269 } 2270 DC_INC(idx, DC_TX_LIST_CNT); 2271 continue; 2272 } 2273 2274 if (DC_IS_XIRCOM(sc)) { 2275 /* 2276 * XXX: Why does my Xircom taunt me so? 2277 * For some reason it likes setting the CARRLOST flag 2278 * even when the carrier is there. wtf?! */ 2279 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2280 sc->dc_pmode == DC_PMODE_MII && 2281 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2282 DC_TXSTAT_NOCARRIER))) 2283 txstat &= ~DC_TXSTAT_ERRSUM; 2284 } else { 2285 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2286 sc->dc_pmode == DC_PMODE_MII && 2287 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2288 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2289 txstat &= ~DC_TXSTAT_ERRSUM; 2290 } 2291 2292 if (txstat & DC_TXSTAT_ERRSUM) { 2293 ifp->if_oerrors++; 2294 if (txstat & DC_TXSTAT_EXCESSCOLL) 2295 ifp->if_collisions++; 2296 if (txstat & DC_TXSTAT_LATECOLL) 2297 ifp->if_collisions++; 2298 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2299 dc_init(sc); 2300 return; 2301 } 2302 } 2303 2304 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2305 2306 ifp->if_opackets++; 2307 if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) { 2308 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map; 2309 2310 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2311 BUS_DMASYNC_POSTWRITE); 2312 bus_dmamap_unload(sc->sc_dmat, map); 2313 } 2314 if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) { 2315 m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf); 2316 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL; 2317 } 2318 sc->dc_cdata.dc_tx_cnt--; 2319 DC_INC(idx, DC_TX_LIST_CNT); 2320 } 2321 2322 sc->dc_cdata.dc_tx_cons = idx; 2323 if (cur_tx != NULL) 2324 ifp->if_flags &= ~IFF_OACTIVE; 2325 2326 return; 2327 } 2328 2329 void dc_tick(xsc) 2330 void *xsc; 2331 { 2332 struct dc_softc *sc = (struct dc_softc *)xsc; 2333 struct mii_data *mii; 2334 struct ifnet *ifp; 2335 int s; 2336 u_int32_t r; 2337 2338 s = splimp(); 2339 2340 ifp = &sc->arpcom.ac_if; 2341 mii = &sc->sc_mii; 2342 2343 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2344 if (sc->dc_flags & DC_21143_NWAY) { 2345 r = CSR_READ_4(sc, DC_10BTSTAT); 2346 if (IFM_SUBTYPE(mii->mii_media_active) == 2347 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2348 sc->dc_link = 0; 2349 mii_mediachg(mii); 2350 } 2351 if (IFM_SUBTYPE(mii->mii_media_active) == 2352 IFM_10_T && (r & DC_TSTAT_LS10)) { 2353 sc->dc_link = 0; 2354 mii_mediachg(mii); 2355 } 2356 if (sc->dc_link == 0) 2357 mii_tick(mii); 2358 } else { 2359 r = CSR_READ_4(sc, DC_ISR); 2360 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2361 sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) 2362 mii_tick(mii); 2363 if (!(mii->mii_media_status & IFM_ACTIVE)) 2364 sc->dc_link = 0; 2365 } 2366 } else 2367 mii_tick(mii); 2368 2369 /* 2370 * When the init routine completes, we expect to be able to send 2371 * packets right away, and in fact the network code will send a 2372 * gratuitous ARP the moment the init routine marks the interface 2373 * as running. However, even though the MAC may have been initialized, 2374 * there may be a delay of a few seconds before the PHY completes 2375 * autonegotiation and the link is brought up. Any transmissions 2376 * made during that delay will be lost. Dealing with this is tricky: 2377 * we can't just pause in the init routine while waiting for the 2378 * PHY to come ready since that would bring the whole system to 2379 * a screeching halt for several seconds. 2380 * 2381 * What we do here is prevent the TX start routine from sending 2382 * any packets until a link has been established. After the 2383 * interface has been initialized, the tick routine will poll 2384 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2385 * that time, packets will stay in the send queue, and once the 2386 * link comes up, they will be flushed out to the wire. 2387 */ 2388 if (!sc->dc_link) { 2389 mii_pollstat(mii); 2390 if (mii->mii_media_status & IFM_ACTIVE && 2391 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2392 sc->dc_link++; 2393 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2394 dc_start(ifp); 2395 } 2396 } 2397 2398 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2399 timeout_add(&sc->dc_tick_tmo, hz / 10); 2400 else 2401 timeout_add(&sc->dc_tick_tmo, hz); 2402 2403 splx(s); 2404 2405 return; 2406 } 2407 2408 int dc_intr(arg) 2409 void *arg; 2410 { 2411 struct dc_softc *sc; 2412 struct ifnet *ifp; 2413 u_int32_t status; 2414 int claimed = 0; 2415 2416 sc = arg; 2417 ifp = &sc->arpcom.ac_if; 2418 2419 /* Supress unwanted interrupts */ 2420 if (!(ifp->if_flags & IFF_UP)) { 2421 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 2422 dc_stop(sc); 2423 return claimed; 2424 } 2425 2426 /* Disable interrupts. */ 2427 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2428 2429 while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && 2430 status != 0xFFFFFFFF) { 2431 2432 claimed = 1; 2433 2434 CSR_WRITE_4(sc, DC_ISR, status); 2435 if ((status & DC_INTRS) == 0) { 2436 claimed = 0; 2437 break; 2438 } 2439 2440 if (status & DC_ISR_RX_OK) { 2441 int curpkts; 2442 curpkts = ifp->if_ipackets; 2443 dc_rxeof(sc); 2444 if (curpkts == ifp->if_ipackets) { 2445 while(dc_rx_resync(sc)) 2446 dc_rxeof(sc); 2447 } 2448 } 2449 2450 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 2451 dc_txeof(sc); 2452 2453 if (status & DC_ISR_TX_IDLE) { 2454 dc_txeof(sc); 2455 if (sc->dc_cdata.dc_tx_cnt) { 2456 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2457 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2458 } 2459 } 2460 2461 if (status & DC_ISR_TX_UNDERRUN) { 2462 u_int32_t cfg; 2463 2464 printf("dc%d: TX underrun -- ", sc->dc_unit); 2465 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) 2466 dc_init(sc); 2467 cfg = CSR_READ_4(sc, DC_NETCFG); 2468 cfg &= ~DC_NETCFG_TX_THRESH; 2469 if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) { 2470 printf("using store and forward mode\n"); 2471 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2472 } else if (sc->dc_flags & DC_TX_STORENFWD) { 2473 printf("resetting\n"); 2474 } else { 2475 sc->dc_txthresh += 0x4000; 2476 printf("increasing TX threshold\n"); 2477 CSR_WRITE_4(sc, DC_NETCFG, cfg); 2478 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2479 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2480 } 2481 } 2482 2483 if ((status & DC_ISR_RX_WATDOGTIMEO) 2484 || (status & DC_ISR_RX_NOBUF)) { 2485 int curpkts; 2486 curpkts = ifp->if_ipackets; 2487 dc_rxeof(sc); 2488 if (curpkts == ifp->if_ipackets) { 2489 while(dc_rx_resync(sc)) 2490 dc_rxeof(sc); 2491 } 2492 } 2493 2494 if (status & DC_ISR_BUS_ERR) { 2495 dc_reset(sc); 2496 dc_init(sc); 2497 } 2498 } 2499 2500 /* Re-enable interrupts. */ 2501 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2502 2503 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2504 dc_start(ifp); 2505 2506 return (claimed); 2507 } 2508 2509 /* 2510 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2511 * pointers to the fragment pointers. 2512 */ 2513 int dc_encap(sc, m_head, txidx) 2514 struct dc_softc *sc; 2515 struct mbuf *m_head; 2516 u_int32_t *txidx; 2517 { 2518 struct dc_desc *f = NULL; 2519 int frag, cur, cnt = 0, i; 2520 bus_dmamap_t map; 2521 2522 /* 2523 * Start packing the mbufs in this chain into 2524 * the fragment pointers. Stop when we run out 2525 * of fragments or hit the end of the mbuf chain. 2526 */ 2527 map = sc->sc_tx_sparemap; 2528 2529 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 2530 m_head, BUS_DMA_NOWAIT) != 0) 2531 return (ENOBUFS); 2532 2533 cur = frag = *txidx; 2534 2535 for (i = 0; i < map->dm_nsegs; i++) { 2536 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 2537 if (*txidx != sc->dc_cdata.dc_tx_prod && 2538 frag == (DC_TX_LIST_CNT - 1)) { 2539 bus_dmamap_unload(sc->sc_dmat, map); 2540 return(ENOBUFS); 2541 } 2542 } 2543 if ((DC_TX_LIST_CNT - 2544 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) { 2545 bus_dmamap_unload(sc->sc_dmat, map); 2546 return(ENOBUFS); 2547 } 2548 2549 f = &sc->dc_ldata->dc_tx_list[frag]; 2550 f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len); 2551 if (cnt == 0) { 2552 f->dc_status = htole32(0); 2553 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 2554 } else 2555 f->dc_status = htole32(DC_TXSTAT_OWN); 2556 f->dc_data = htole32(map->dm_segs[i].ds_addr); 2557 cur = frag; 2558 DC_INC(frag, DC_TX_LIST_CNT); 2559 cnt++; 2560 } 2561 2562 sc->dc_cdata.dc_tx_cnt += cnt; 2563 sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head; 2564 sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map; 2565 sc->dc_cdata.dc_tx_chain[cur].sd_map = map; 2566 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 2567 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 2568 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= 2569 htole32(DC_TXCTL_FINT); 2570 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 2571 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2572 htole32(DC_TXCTL_FINT); 2573 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 2574 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2575 htole32(DC_TXCTL_FINT); 2576 #ifdef ALTQ 2577 else if ((sc->dc_flags & DC_TX_USE_TX_INTR) && 2578 TBR_IS_ENABLED(&sc->arpcom.ac_if.if_snd)) 2579 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= 2580 htole32(DC_TXCTL_FINT); 2581 #endif 2582 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2583 BUS_DMASYNC_PREWRITE); 2584 2585 sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN); 2586 2587 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 2588 offsetof(struct dc_list_data, dc_tx_list[0]), 2589 sizeof(struct dc_desc) * DC_TX_LIST_CNT, 2590 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2591 2592 *txidx = frag; 2593 2594 return(0); 2595 } 2596 2597 /* 2598 * Coalesce an mbuf chain into a single mbuf cluster buffer. 2599 * Needed for some really badly behaved chips that just can't 2600 * do scatter/gather correctly. 2601 */ 2602 int dc_coal(sc, m_head) 2603 struct dc_softc *sc; 2604 struct mbuf **m_head; 2605 { 2606 struct mbuf *m_new, *m; 2607 2608 m = *m_head; 2609 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 2610 if (m_new == NULL) { 2611 printf("dc%d: no memory for tx list", sc->dc_unit); 2612 return(ENOBUFS); 2613 } 2614 if (m->m_pkthdr.len > MHLEN) { 2615 MCLGET(m_new, M_DONTWAIT); 2616 if (!(m_new->m_flags & M_EXT)) { 2617 m_freem(m_new); 2618 printf("dc%d: no memory for tx list", sc->dc_unit); 2619 return(ENOBUFS); 2620 } 2621 } 2622 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 2623 m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; 2624 m_freem(m); 2625 *m_head = m_new; 2626 2627 return(0); 2628 } 2629 2630 /* 2631 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2632 * to the mbuf data regions directly in the transmit lists. We also save a 2633 * copy of the pointers since the transmit list fragment pointers are 2634 * physical addresses. 2635 */ 2636 2637 void dc_start(ifp) 2638 struct ifnet *ifp; 2639 { 2640 struct dc_softc *sc; 2641 struct mbuf *m_head = NULL; 2642 int idx; 2643 2644 sc = ifp->if_softc; 2645 2646 if (!sc->dc_link) 2647 return; 2648 2649 if (ifp->if_flags & IFF_OACTIVE) 2650 return; 2651 2652 idx = sc->dc_cdata.dc_tx_prod; 2653 2654 while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) { 2655 IFQ_POLL(&ifp->if_snd, m_head); 2656 if (m_head == NULL) 2657 break; 2658 2659 if (sc->dc_flags & DC_TX_COALESCE) { 2660 #ifdef ALTQ 2661 /* note: dc_coal breaks the poll-and-dequeue rule. 2662 * if dc_coal fails, we lose the packet. 2663 */ 2664 #endif 2665 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2666 if (dc_coal(sc, &m_head)) { 2667 ifp->if_flags |= IFF_OACTIVE; 2668 break; 2669 } 2670 } 2671 2672 if (dc_encap(sc, m_head, &idx)) { 2673 ifp->if_flags |= IFF_OACTIVE; 2674 break; 2675 } 2676 2677 /* now we are committed to transmit the packet */ 2678 if (sc->dc_flags & DC_TX_COALESCE) { 2679 /* if mbuf is coalesced, it is already dequeued */ 2680 } else 2681 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2682 2683 /* 2684 * If there's a BPF listener, bounce a copy of this frame 2685 * to him. 2686 */ 2687 #if NBPFILTER > 0 2688 if (ifp->if_bpf) 2689 bpf_mtap(ifp->if_bpf, m_head); 2690 #endif 2691 if (sc->dc_flags & DC_TX_ONE) { 2692 ifp->if_flags |= IFF_OACTIVE; 2693 break; 2694 } 2695 } 2696 if (idx == sc->dc_cdata.dc_tx_prod) 2697 return; 2698 2699 /* Transmit */ 2700 sc->dc_cdata.dc_tx_prod = idx; 2701 if (!(sc->dc_flags & DC_TX_POLL)) 2702 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2703 2704 /* 2705 * Set a timeout in case the chip goes out to lunch. 2706 */ 2707 ifp->if_timer = 5; 2708 2709 return; 2710 } 2711 2712 void dc_init(xsc) 2713 void *xsc; 2714 { 2715 struct dc_softc *sc = xsc; 2716 struct ifnet *ifp = &sc->arpcom.ac_if; 2717 struct mii_data *mii; 2718 int s; 2719 2720 s = splimp(); 2721 2722 mii = &sc->sc_mii; 2723 2724 /* 2725 * Cancel pending I/O and free all RX/TX buffers. 2726 */ 2727 dc_stop(sc); 2728 dc_reset(sc); 2729 2730 /* 2731 * Set cache alignment and burst length. 2732 */ 2733 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 2734 CSR_WRITE_4(sc, DC_BUSCTL, 0); 2735 else 2736 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 2737 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 2738 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 2739 } else { 2740 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 2741 } 2742 if (sc->dc_flags & DC_TX_POLL) 2743 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 2744 switch(sc->dc_cachesize) { 2745 case 32: 2746 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 2747 break; 2748 case 16: 2749 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 2750 break; 2751 case 8: 2752 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 2753 break; 2754 case 0: 2755 default: 2756 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 2757 break; 2758 } 2759 2760 if (sc->dc_flags & DC_TX_STORENFWD) 2761 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2762 else { 2763 if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) { 2764 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2765 } else { 2766 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2767 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2768 } 2769 } 2770 2771 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 2772 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 2773 2774 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2775 /* 2776 * The app notes for the 98713 and 98715A say that 2777 * in order to have the chips operate properly, a magic 2778 * number must be written to CSR16. Macronix does not 2779 * document the meaning of these bits so there's no way 2780 * to know exactly what they do. The 98713 has a magic 2781 * number all its own; the rest all use a different one. 2782 */ 2783 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 2784 if (sc->dc_type == DC_TYPE_98713) 2785 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 2786 else 2787 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 2788 } 2789 2790 if (DC_IS_XIRCOM(sc)) { 2791 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2792 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2793 DELAY(10); 2794 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2795 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2796 DELAY(10); 2797 } 2798 2799 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2800 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_72BYTES); 2801 2802 /* Init circular RX list. */ 2803 if (dc_list_rx_init(sc) == ENOBUFS) { 2804 printf("dc%d: initialization failed: no " 2805 "memory for rx buffers\n", sc->dc_unit); 2806 dc_stop(sc); 2807 (void)splx(s); 2808 return; 2809 } 2810 2811 /* 2812 * Init tx descriptors. 2813 */ 2814 dc_list_tx_init(sc); 2815 2816 /* 2817 * Load the address of the RX list. 2818 */ 2819 CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 2820 offsetof(struct dc_list_data, dc_rx_list[0])); 2821 CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 2822 offsetof(struct dc_list_data, dc_tx_list[0])); 2823 2824 /* 2825 * Enable interrupts. 2826 */ 2827 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2828 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 2829 2830 /* Enable transmitter. */ 2831 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2832 2833 /* 2834 * If this is an Intel 21143 and we're not using the 2835 * MII port, program the LED control pins so we get 2836 * link and activity indications. 2837 */ 2838 if (sc->dc_flags & DC_TULIP_LEDS) { 2839 CSR_WRITE_4(sc, DC_WATCHDOG, 2840 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 2841 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 2842 } 2843 2844 /* 2845 * Load the RX/multicast filter. We do this sort of late 2846 * because the filter programming scheme on the 21143 and 2847 * some clones requires DMAing a setup frame via the TX 2848 * engine, and we need the transmitter enabled for that. 2849 */ 2850 dc_setfilt(sc); 2851 2852 /* Enable receiver. */ 2853 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 2854 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 2855 2856 mii_mediachg(mii); 2857 dc_setcfg(sc, sc->dc_if_media); 2858 2859 ifp->if_flags |= IFF_RUNNING; 2860 ifp->if_flags &= ~IFF_OACTIVE; 2861 2862 (void)splx(s); 2863 2864 timeout_set(&sc->dc_tick_tmo, dc_tick, sc); 2865 2866 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 2867 sc->dc_link = 1; 2868 else { 2869 if (sc->dc_flags & DC_21143_NWAY) 2870 timeout_add(&sc->dc_tick_tmo, hz / 10); 2871 else 2872 timeout_add(&sc->dc_tick_tmo, hz); 2873 } 2874 2875 #ifdef SRM_MEDIA 2876 if(sc->dc_srm_media) { 2877 struct ifreq ifr; 2878 2879 ifr.ifr_media = sc->dc_srm_media; 2880 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 2881 sc->dc_srm_media = 0; 2882 } 2883 #endif 2884 2885 return; 2886 } 2887 2888 /* 2889 * Set media options. 2890 */ 2891 int dc_ifmedia_upd(ifp) 2892 struct ifnet *ifp; 2893 { 2894 struct dc_softc *sc; 2895 struct mii_data *mii; 2896 struct ifmedia *ifm; 2897 2898 sc = ifp->if_softc; 2899 mii = &sc->sc_mii; 2900 mii_mediachg(mii); 2901 2902 ifm = &mii->mii_media; 2903 2904 if (DC_IS_DAVICOM(sc) && 2905 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 2906 dc_setcfg(sc, ifm->ifm_media); 2907 else 2908 sc->dc_link = 0; 2909 2910 return(0); 2911 } 2912 2913 /* 2914 * Report current media status. 2915 */ 2916 void dc_ifmedia_sts(ifp, ifmr) 2917 struct ifnet *ifp; 2918 struct ifmediareq *ifmr; 2919 { 2920 struct dc_softc *sc; 2921 struct mii_data *mii; 2922 struct ifmedia *ifm; 2923 2924 sc = ifp->if_softc; 2925 mii = &sc->sc_mii; 2926 mii_pollstat(mii); 2927 ifm = &mii->mii_media; 2928 if (DC_IS_DAVICOM(sc)) { 2929 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 2930 ifmr->ifm_active = ifm->ifm_media; 2931 ifmr->ifm_status = 0; 2932 return; 2933 } 2934 } 2935 ifmr->ifm_active = mii->mii_media_active; 2936 ifmr->ifm_status = mii->mii_media_status; 2937 2938 return; 2939 } 2940 2941 int dc_ioctl(ifp, command, data) 2942 struct ifnet *ifp; 2943 u_long command; 2944 caddr_t data; 2945 { 2946 struct dc_softc *sc = ifp->if_softc; 2947 struct ifreq *ifr = (struct ifreq *) data; 2948 struct ifaddr *ifa = (struct ifaddr *)data; 2949 struct mii_data *mii; 2950 int s, error = 0; 2951 2952 s = splimp(); 2953 2954 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 2955 splx(s); 2956 return error; 2957 } 2958 2959 switch(command) { 2960 case SIOCSIFADDR: 2961 ifp->if_flags |= IFF_UP; 2962 switch (ifa->ifa_addr->sa_family) { 2963 case AF_INET: 2964 dc_init(sc); 2965 arp_ifinit(&sc->arpcom, ifa); 2966 break; 2967 default: 2968 dc_init(sc); 2969 break; 2970 } 2971 break; 2972 case SIOCSIFFLAGS: 2973 if (ifp->if_flags & IFF_UP) { 2974 if (ifp->if_flags & IFF_RUNNING && 2975 ifp->if_flags & IFF_PROMISC && 2976 !(sc->dc_if_flags & IFF_PROMISC)) { 2977 dc_setfilt(sc); 2978 } else if (ifp->if_flags & IFF_RUNNING && 2979 !(ifp->if_flags & IFF_PROMISC) && 2980 sc->dc_if_flags & IFF_PROMISC) { 2981 dc_setfilt(sc); 2982 } else if (!(ifp->if_flags & IFF_RUNNING)) { 2983 sc->dc_txthresh = 0; 2984 dc_init(sc); 2985 } 2986 } else { 2987 if (ifp->if_flags & IFF_RUNNING) 2988 dc_stop(sc); 2989 } 2990 sc->dc_if_flags = ifp->if_flags; 2991 error = 0; 2992 break; 2993 case SIOCADDMULTI: 2994 case SIOCDELMULTI: 2995 error = (command == SIOCADDMULTI) ? 2996 ether_addmulti(ifr, &sc->arpcom) : 2997 ether_delmulti(ifr, &sc->arpcom); 2998 2999 if (error == ENETRESET) { 3000 /* 3001 * Multicast list has changed; set the hardware 3002 * filter accordingly. 3003 */ 3004 dc_setfilt(sc); 3005 error = 0; 3006 } 3007 break; 3008 case SIOCGIFMEDIA: 3009 case SIOCSIFMEDIA: 3010 mii = &sc->sc_mii; 3011 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3012 #ifdef SRM_MEDIA 3013 if (sc->dc_srm_media) 3014 sc->dc_srm_media = 0; 3015 #endif 3016 break; 3017 default: 3018 error = EINVAL; 3019 break; 3020 } 3021 3022 (void)splx(s); 3023 3024 return(error); 3025 } 3026 3027 void dc_watchdog(ifp) 3028 struct ifnet *ifp; 3029 { 3030 struct dc_softc *sc; 3031 3032 sc = ifp->if_softc; 3033 3034 ifp->if_oerrors++; 3035 printf("dc%d: watchdog timeout\n", sc->dc_unit); 3036 3037 dc_stop(sc); 3038 dc_reset(sc); 3039 dc_init(sc); 3040 3041 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 3042 dc_start(ifp); 3043 3044 return; 3045 } 3046 3047 /* 3048 * Stop the adapter and free any mbufs allocated to the 3049 * RX and TX lists. 3050 */ 3051 void dc_stop(sc) 3052 struct dc_softc *sc; 3053 { 3054 register int i; 3055 struct ifnet *ifp; 3056 3057 ifp = &sc->arpcom.ac_if; 3058 ifp->if_timer = 0; 3059 3060 timeout_del(&sc->dc_tick_tmo); 3061 3062 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3063 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3064 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3065 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3066 sc->dc_link = 0; 3067 3068 /* 3069 * Free data in the RX lists. 3070 */ 3071 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3072 if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) { 3073 bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map; 3074 3075 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3076 BUS_DMASYNC_POSTREAD); 3077 bus_dmamap_unload(sc->sc_dmat, map); 3078 } 3079 if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) { 3080 m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf); 3081 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL; 3082 } 3083 } 3084 bzero((char *)&sc->dc_ldata->dc_rx_list, 3085 sizeof(sc->dc_ldata->dc_rx_list)); 3086 3087 /* 3088 * Free the TX list buffers. 3089 */ 3090 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3091 if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) { 3092 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map; 3093 3094 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 3095 BUS_DMASYNC_POSTWRITE); 3096 bus_dmamap_unload(sc->sc_dmat, map); 3097 } 3098 if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) { 3099 if (sc->dc_ldata->dc_tx_list[i].dc_ctl & 3100 htole32(DC_TXCTL_SETUP)) { 3101 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL; 3102 continue; 3103 } 3104 m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf); 3105 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL; 3106 } 3107 } 3108 3109 bzero((char *)&sc->dc_ldata->dc_tx_list, 3110 sizeof(sc->dc_ldata->dc_tx_list)); 3111 3112 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3113 3114 return; 3115 } 3116 3117 /* 3118 * Stop all chip I/O so that the kernel's probe routines don't 3119 * get confused by errant DMAs when rebooting. 3120 */ 3121 void dc_shutdown(v) 3122 void *v; 3123 { 3124 struct dc_softc *sc = (struct dc_softc *)v; 3125 3126 dc_stop(sc); 3127 } 3128 3129 struct cfdriver dc_cd = { 3130 0, "dc", DV_IFNET 3131 }; 3132 3133