1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_dc.c,v 1.9.2.45 2003/06/08 14:31:53 mux Exp $ 33 * $DragonFly: src/sys/dev/netif/dc/if_dc.c,v 1.42 2005/09/08 10:26:20 sephe Exp $ 34 */ 35 36 /* 37 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 38 * series chips and several workalikes including the following: 39 * 40 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 41 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 42 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 43 * ASIX Electronics AX88140A (www.asix.com.tw) 44 * ASIX Electronics AX88141 (www.asix.com.tw) 45 * ADMtek AL981 (www.admtek.com.tw) 46 * ADMtek AN985 (www.admtek.com.tw) 47 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 48 * Accton EN1217 (www.accton.com) 49 * Conexant LANfinity (www.conexant.com) 50 * 51 * Datasheets for the 21143 are available at developer.intel.com. 52 * Datasheets for the clone parts can be found at their respective sites. 53 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 54 * The PNIC II is essentially a Macronix 98715A chip; the only difference 55 * worth noting is that its multicast hash table is only 128 bits wide 56 * instead of 512. 57 * 58 * Written by Bill Paul <wpaul@ee.columbia.edu> 59 * Electrical Engineering Department 60 * Columbia University, New York City 61 */ 62 63 /* 64 * The Intel 21143 is the successor to the DEC 21140. It is basically 65 * the same as the 21140 but with a few new features. The 21143 supports 66 * three kinds of media attachments: 67 * 68 * o MII port, for 10Mbps and 100Mbps support and NWAY 69 * autonegotiation provided by an external PHY. 70 * o SYM port, for symbol mode 100Mbps support. 71 * o 10baseT port. 72 * o AUI/BNC port. 73 * 74 * The 100Mbps SYM port and 10baseT port can be used together in 75 * combination with the internal NWAY support to create a 10/100 76 * autosensing configuration. 77 * 78 * Note that not all tulip workalikes are handled in this driver: we only 79 * deal with those which are relatively well behaved. The Winbond is 80 * handled separately due to its different register offsets and the 81 * special handling needed for its various bugs. The PNIC is handled 82 * here, but I'm not thrilled about it. 83 * 84 * All of the workalike chips use some form of MII transceiver support 85 * with the exception of the Macronix chips, which also have a SYM port. 86 * The ASIX AX88140A is also documented to have a SYM port, but all 87 * the cards I've seen use an MII transceiver, probably because the 88 * AX88140A doesn't support internal NWAY. 89 */ 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/sysctl.h> 99 #include <sys/thread2.h> 100 101 #include <net/if.h> 102 #include <net/ifq_var.h> 103 #include <net/if_arp.h> 104 #include <net/ethernet.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 #include <net/if_types.h> 108 #include <net/vlan/if_vlan_var.h> 109 110 #include <net/bpf.h> 111 112 #include <vm/vm.h> /* for vtophys */ 113 #include <vm/pmap.h> /* for vtophys */ 114 #include <machine/bus_pio.h> 115 #include <machine/bus_memio.h> 116 #include <machine/bus.h> 117 #include <machine/resource.h> 118 #include <sys/bus.h> 119 #include <sys/rman.h> 120 121 #include "../mii_layer/mii.h" 122 #include "../mii_layer/miivar.h" 123 124 #include <bus/pci/pcireg.h> 125 #include <bus/pci/pcivar.h> 126 127 #define DC_USEIOSPACE 128 129 #include "if_dcreg.h" 130 131 /* "controller miibus0" required. See GENERIC if you get errors here. */ 132 #include "miibus_if.h" 133 134 /* 135 * Various supported device vendors/types and their names. 136 */ 137 static const struct dc_type dc_devs[] = { 138 { DC_VENDORID_DEC, DC_DEVICEID_21143, 139 "Intel 21143 10/100BaseTX" }, 140 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 141 "Davicom DM9009 10/100BaseTX" }, 142 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 143 "Davicom DM9100 10/100BaseTX" }, 144 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 145 "Davicom DM9102 10/100BaseTX" }, 146 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 147 "Davicom DM9102A 10/100BaseTX" }, 148 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 149 "ADMtek AL981 10/100BaseTX" }, 150 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 151 "ADMtek AN985 10/100BaseTX" }, 152 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, 153 "ADMtek ADM9511 10/100BaseTX" }, 154 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, 155 "ADMtek ADM9513 10/100BaseTX" }, 156 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 157 "ASIX AX88140A 10/100BaseTX" }, 158 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 159 "ASIX AX88141 10/100BaseTX" }, 160 { DC_VENDORID_MX, DC_DEVICEID_98713, 161 "Macronix 98713 10/100BaseTX" }, 162 { DC_VENDORID_MX, DC_DEVICEID_98713, 163 "Macronix 98713A 10/100BaseTX" }, 164 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 165 "Compex RL100-TX 10/100BaseTX" }, 166 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 167 "Compex RL100-TX 10/100BaseTX" }, 168 { DC_VENDORID_MX, DC_DEVICEID_987x5, 169 "Macronix 98715/98715A 10/100BaseTX" }, 170 { DC_VENDORID_MX, DC_DEVICEID_987x5, 171 "Macronix 98715AEC-C 10/100BaseTX" }, 172 { DC_VENDORID_MX, DC_DEVICEID_987x5, 173 "Macronix 98725 10/100BaseTX" }, 174 { DC_VENDORID_MX, DC_DEVICEID_98727, 175 "Macronix 98727/98732 10/100BaseTX" }, 176 { DC_VENDORID_LO, DC_DEVICEID_82C115, 177 "LC82C115 PNIC II 10/100BaseTX" }, 178 { DC_VENDORID_LO, DC_DEVICEID_82C168, 179 "82c168 PNIC 10/100BaseTX" }, 180 { DC_VENDORID_LO, DC_DEVICEID_82C168, 181 "82c169 PNIC 10/100BaseTX" }, 182 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 183 "Accton EN1217 10/100BaseTX" }, 184 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 185 "Accton EN2242 MiniPCI 10/100BaseTX" }, 186 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 187 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 188 { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, 189 "3Com OfficeConnect 10/100B" }, 190 { 0, 0, NULL } 191 }; 192 193 static int dc_probe (device_t); 194 static int dc_attach (device_t); 195 static int dc_detach (device_t); 196 static int dc_suspend (device_t); 197 static int dc_resume (device_t); 198 static void dc_acpi (device_t); 199 static const struct dc_type *dc_devtype (device_t); 200 static int dc_newbuf (struct dc_softc *, int, struct mbuf *); 201 static int dc_encap (struct dc_softc *, struct mbuf *, 202 u_int32_t *); 203 static void dc_pnic_rx_bug_war (struct dc_softc *, int); 204 static int dc_rx_resync (struct dc_softc *); 205 static void dc_rxeof (struct dc_softc *); 206 static void dc_txeof (struct dc_softc *); 207 static void dc_tick (void *); 208 static void dc_tx_underrun (struct dc_softc *); 209 static void dc_intr (void *); 210 static void dc_start (struct ifnet *); 211 static int dc_ioctl (struct ifnet *, u_long, caddr_t, 212 struct ucred *); 213 #ifdef DEVICE_POLLING 214 static void dc_poll (struct ifnet *ifp, enum poll_cmd cmd, 215 int count); 216 #endif 217 static void dc_init (void *); 218 static void dc_stop (struct dc_softc *); 219 static void dc_watchdog (struct ifnet *); 220 static void dc_shutdown (device_t); 221 static int dc_ifmedia_upd (struct ifnet *); 222 static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 223 224 static void dc_delay (struct dc_softc *); 225 static void dc_eeprom_idle (struct dc_softc *); 226 static void dc_eeprom_putbyte (struct dc_softc *, int); 227 static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 228 static void dc_eeprom_getword_pnic 229 (struct dc_softc *, int, u_int16_t *); 230 static void dc_eeprom_width (struct dc_softc *); 231 static void dc_read_eeprom (struct dc_softc *, caddr_t, int, 232 int, int); 233 234 static void dc_mii_writebit (struct dc_softc *, int); 235 static int dc_mii_readbit (struct dc_softc *); 236 static void dc_mii_sync (struct dc_softc *); 237 static void dc_mii_send (struct dc_softc *, u_int32_t, int); 238 static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 239 static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 240 static int dc_miibus_readreg (device_t, int, int); 241 static int dc_miibus_writereg (device_t, int, int, int); 242 static void dc_miibus_statchg (device_t); 243 static void dc_miibus_mediainit (device_t); 244 245 static u_int32_t dc_crc_mask (struct dc_softc *); 246 static void dc_setcfg (struct dc_softc *, int); 247 static void dc_setfilt_21143 (struct dc_softc *); 248 static void dc_setfilt_asix (struct dc_softc *); 249 static void dc_setfilt_admtek (struct dc_softc *); 250 251 static void dc_setfilt (struct dc_softc *); 252 253 static void dc_reset (struct dc_softc *); 254 static int dc_list_rx_init (struct dc_softc *); 255 static int dc_list_tx_init (struct dc_softc *); 256 257 static void dc_read_srom (struct dc_softc *, int); 258 static void dc_parse_21143_srom (struct dc_softc *); 259 static void dc_decode_leaf_sia (struct dc_softc *, 260 struct dc_eblock_sia *); 261 static void dc_decode_leaf_mii (struct dc_softc *, 262 struct dc_eblock_mii *); 263 static void dc_decode_leaf_sym (struct dc_softc *, 264 struct dc_eblock_sym *); 265 static void dc_apply_fixup (struct dc_softc *, int); 266 267 #ifdef DC_USEIOSPACE 268 #define DC_RES SYS_RES_IOPORT 269 #define DC_RID DC_PCI_CFBIO 270 #else 271 #define DC_RES SYS_RES_MEMORY 272 #define DC_RID DC_PCI_CFBMA 273 #endif 274 275 static device_method_t dc_methods[] = { 276 /* Device interface */ 277 DEVMETHOD(device_probe, dc_probe), 278 DEVMETHOD(device_attach, dc_attach), 279 DEVMETHOD(device_detach, dc_detach), 280 DEVMETHOD(device_suspend, dc_suspend), 281 DEVMETHOD(device_resume, dc_resume), 282 DEVMETHOD(device_shutdown, dc_shutdown), 283 284 /* bus interface */ 285 DEVMETHOD(bus_print_child, bus_generic_print_child), 286 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 287 288 /* MII interface */ 289 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 290 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 291 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 292 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 293 294 { 0, 0 } 295 }; 296 297 static driver_t dc_driver = { 298 "dc", 299 dc_methods, 300 sizeof(struct dc_softc) 301 }; 302 303 static devclass_t dc_devclass; 304 305 #ifdef __i386__ 306 static int dc_quick=1; 307 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, 308 &dc_quick,0,"do not mdevget in dc driver"); 309 #endif 310 311 DECLARE_DUMMY_MODULE(if_dc); 312 DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, 0, 0); 313 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 314 315 #define DC_SETBIT(sc, reg, x) \ 316 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 317 318 #define DC_CLRBIT(sc, reg, x) \ 319 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 320 321 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 322 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 323 324 static void 325 dc_delay(struct dc_softc *sc) 326 { 327 int idx; 328 329 for (idx = (300 / 33) + 1; idx > 0; idx--) 330 CSR_READ_4(sc, DC_BUSCTL); 331 } 332 333 static void 334 dc_eeprom_width(struct dc_softc *sc) 335 { 336 int i; 337 338 /* Force EEPROM to idle state. */ 339 dc_eeprom_idle(sc); 340 341 /* Enter EEPROM access mode. */ 342 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 343 dc_delay(sc); 344 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 345 dc_delay(sc); 346 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 347 dc_delay(sc); 348 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 349 dc_delay(sc); 350 351 for (i = 3; i--;) { 352 if (6 & (1 << i)) 353 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 354 else 355 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 356 dc_delay(sc); 357 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 358 dc_delay(sc); 359 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 360 dc_delay(sc); 361 } 362 363 for (i = 1; i <= 12; i++) { 364 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 365 dc_delay(sc); 366 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 367 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 368 dc_delay(sc); 369 break; 370 } 371 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 372 dc_delay(sc); 373 } 374 375 /* Turn off EEPROM access mode. */ 376 dc_eeprom_idle(sc); 377 378 if (i < 4 || i > 12) 379 sc->dc_romwidth = 6; 380 else 381 sc->dc_romwidth = i; 382 383 /* Enter EEPROM access mode. */ 384 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 385 dc_delay(sc); 386 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 387 dc_delay(sc); 388 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 389 dc_delay(sc); 390 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 391 dc_delay(sc); 392 393 /* Turn off EEPROM access mode. */ 394 dc_eeprom_idle(sc); 395 } 396 397 static void 398 dc_eeprom_idle(struct dc_softc *sc) 399 { 400 int i; 401 402 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 403 dc_delay(sc); 404 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 405 dc_delay(sc); 406 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 407 dc_delay(sc); 408 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 409 dc_delay(sc); 410 411 for (i = 0; i < 25; i++) { 412 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 413 dc_delay(sc); 414 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 415 dc_delay(sc); 416 } 417 418 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 419 dc_delay(sc); 420 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 421 dc_delay(sc); 422 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 423 424 return; 425 } 426 427 /* 428 * Send a read command and address to the EEPROM, check for ACK. 429 */ 430 static void 431 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 432 { 433 int d, i; 434 435 d = DC_EECMD_READ >> 6; 436 for (i = 3; i--; ) { 437 if (d & (1 << i)) 438 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 439 else 440 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 441 dc_delay(sc); 442 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 443 dc_delay(sc); 444 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 445 dc_delay(sc); 446 } 447 448 /* 449 * Feed in each bit and strobe the clock. 450 */ 451 for (i = sc->dc_romwidth; i--;) { 452 if (addr & (1 << i)) { 453 SIO_SET(DC_SIO_EE_DATAIN); 454 } else { 455 SIO_CLR(DC_SIO_EE_DATAIN); 456 } 457 dc_delay(sc); 458 SIO_SET(DC_SIO_EE_CLK); 459 dc_delay(sc); 460 SIO_CLR(DC_SIO_EE_CLK); 461 dc_delay(sc); 462 } 463 464 return; 465 } 466 467 /* 468 * Read a word of data stored in the EEPROM at address 'addr.' 469 * The PNIC 82c168/82c169 has its own non-standard way to read 470 * the EEPROM. 471 */ 472 static void 473 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 474 { 475 int i; 476 u_int32_t r; 477 478 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 479 480 for (i = 0; i < DC_TIMEOUT; i++) { 481 DELAY(1); 482 r = CSR_READ_4(sc, DC_SIO); 483 if (!(r & DC_PN_SIOCTL_BUSY)) { 484 *dest = (u_int16_t)(r & 0xFFFF); 485 return; 486 } 487 } 488 489 return; 490 } 491 492 /* 493 * Read a word of data stored in the EEPROM at address 'addr.' 494 */ 495 static void 496 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 497 { 498 int i; 499 u_int16_t word = 0; 500 501 /* Force EEPROM to idle state. */ 502 dc_eeprom_idle(sc); 503 504 /* Enter EEPROM access mode. */ 505 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 506 dc_delay(sc); 507 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 508 dc_delay(sc); 509 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 510 dc_delay(sc); 511 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 512 dc_delay(sc); 513 514 /* 515 * Send address of word we want to read. 516 */ 517 dc_eeprom_putbyte(sc, addr); 518 519 /* 520 * Start reading bits from EEPROM. 521 */ 522 for (i = 0x8000; i; i >>= 1) { 523 SIO_SET(DC_SIO_EE_CLK); 524 dc_delay(sc); 525 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 526 word |= i; 527 dc_delay(sc); 528 SIO_CLR(DC_SIO_EE_CLK); 529 dc_delay(sc); 530 } 531 532 /* Turn off EEPROM access mode. */ 533 dc_eeprom_idle(sc); 534 535 *dest = word; 536 537 return; 538 } 539 540 /* 541 * Read a sequence of words from the EEPROM. 542 */ 543 static void 544 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int swap) 545 { 546 int i; 547 u_int16_t word = 0, *ptr; 548 549 for (i = 0; i < cnt; i++) { 550 if (DC_IS_PNIC(sc)) 551 dc_eeprom_getword_pnic(sc, off + i, &word); 552 else 553 dc_eeprom_getword(sc, off + i, &word); 554 ptr = (u_int16_t *)(dest + (i * 2)); 555 if (swap) 556 *ptr = ntohs(word); 557 else 558 *ptr = word; 559 } 560 561 return; 562 } 563 564 /* 565 * The following two routines are taken from the Macronix 98713 566 * Application Notes pp.19-21. 567 */ 568 /* 569 * Write a bit to the MII bus. 570 */ 571 static void 572 dc_mii_writebit(struct dc_softc *sc, int bit) 573 { 574 if (bit) 575 CSR_WRITE_4(sc, DC_SIO, 576 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 577 else 578 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 579 580 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 581 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 582 583 return; 584 } 585 586 /* 587 * Read a bit from the MII bus. 588 */ 589 static int 590 dc_mii_readbit(struct dc_softc *sc) 591 { 592 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 593 CSR_READ_4(sc, DC_SIO); 594 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 595 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 596 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 597 return(1); 598 599 return(0); 600 } 601 602 /* 603 * Sync the PHYs by setting data bit and strobing the clock 32 times. 604 */ 605 static void 606 dc_mii_sync(struct dc_softc *sc) 607 { 608 int i; 609 610 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 611 612 for (i = 0; i < 32; i++) 613 dc_mii_writebit(sc, 1); 614 615 return; 616 } 617 618 /* 619 * Clock a series of bits through the MII. 620 */ 621 static void 622 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 623 { 624 int i; 625 626 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 627 dc_mii_writebit(sc, bits & i); 628 } 629 630 /* 631 * Read an PHY register through the MII. 632 */ 633 static int 634 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 635 { 636 int ack, i; 637 638 crit_enter(); 639 640 /* 641 * Set up frame for RX. 642 */ 643 frame->mii_stdelim = DC_MII_STARTDELIM; 644 frame->mii_opcode = DC_MII_READOP; 645 frame->mii_turnaround = 0; 646 frame->mii_data = 0; 647 648 /* 649 * Sync the PHYs. 650 */ 651 dc_mii_sync(sc); 652 653 /* 654 * Send command/address info. 655 */ 656 dc_mii_send(sc, frame->mii_stdelim, 2); 657 dc_mii_send(sc, frame->mii_opcode, 2); 658 dc_mii_send(sc, frame->mii_phyaddr, 5); 659 dc_mii_send(sc, frame->mii_regaddr, 5); 660 661 #ifdef notdef 662 /* Idle bit */ 663 dc_mii_writebit(sc, 1); 664 dc_mii_writebit(sc, 0); 665 #endif 666 667 /* Check for ack */ 668 ack = dc_mii_readbit(sc); 669 670 /* 671 * Now try reading data bits. If the ack failed, we still 672 * need to clock through 16 cycles to keep the PHY(s) in sync. 673 */ 674 if (ack) { 675 for(i = 0; i < 16; i++) { 676 dc_mii_readbit(sc); 677 } 678 goto fail; 679 } 680 681 for (i = 0x8000; i; i >>= 1) { 682 if (!ack) { 683 if (dc_mii_readbit(sc)) 684 frame->mii_data |= i; 685 } 686 } 687 688 fail: 689 690 dc_mii_writebit(sc, 0); 691 dc_mii_writebit(sc, 0); 692 693 crit_exit(); 694 695 if (ack) 696 return(1); 697 return(0); 698 } 699 700 /* 701 * Write to a PHY register through the MII. 702 */ 703 static int 704 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 705 { 706 crit_enter(); 707 708 /* 709 * Set up frame for TX. 710 */ 711 712 frame->mii_stdelim = DC_MII_STARTDELIM; 713 frame->mii_opcode = DC_MII_WRITEOP; 714 frame->mii_turnaround = DC_MII_TURNAROUND; 715 716 /* 717 * Sync the PHYs. 718 */ 719 dc_mii_sync(sc); 720 721 dc_mii_send(sc, frame->mii_stdelim, 2); 722 dc_mii_send(sc, frame->mii_opcode, 2); 723 dc_mii_send(sc, frame->mii_phyaddr, 5); 724 dc_mii_send(sc, frame->mii_regaddr, 5); 725 dc_mii_send(sc, frame->mii_turnaround, 2); 726 dc_mii_send(sc, frame->mii_data, 16); 727 728 /* Idle bit. */ 729 dc_mii_writebit(sc, 0); 730 dc_mii_writebit(sc, 0); 731 732 crit_exit(); 733 734 return(0); 735 } 736 737 static int 738 dc_miibus_readreg(device_t dev, int phy, int reg) 739 { 740 struct dc_mii_frame frame; 741 struct dc_softc *sc; 742 int i, rval, phy_reg = 0; 743 744 sc = device_get_softc(dev); 745 bzero((char *)&frame, sizeof(frame)); 746 747 /* 748 * Note: both the AL981 and AN985 have internal PHYs, 749 * however the AL981 provides direct access to the PHY 750 * registers while the AN985 uses a serial MII interface. 751 * The AN985's MII interface is also buggy in that you 752 * can read from any MII address (0 to 31), but only address 1 753 * behaves normally. To deal with both cases, we pretend 754 * that the PHY is at MII address 1. 755 */ 756 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 757 return(0); 758 759 /* 760 * Note: the ukphy probes of the RS7112 report a PHY at 761 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 762 * so we only respond to correct one. 763 */ 764 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 765 return(0); 766 767 if (sc->dc_pmode != DC_PMODE_MII) { 768 if (phy == (MII_NPHY - 1)) { 769 switch(reg) { 770 case MII_BMSR: 771 /* 772 * Fake something to make the probe 773 * code think there's a PHY here. 774 */ 775 return(BMSR_MEDIAMASK); 776 break; 777 case MII_PHYIDR1: 778 if (DC_IS_PNIC(sc)) 779 return(DC_VENDORID_LO); 780 return(DC_VENDORID_DEC); 781 break; 782 case MII_PHYIDR2: 783 if (DC_IS_PNIC(sc)) 784 return(DC_DEVICEID_82C168); 785 return(DC_DEVICEID_21143); 786 break; 787 default: 788 return(0); 789 break; 790 } 791 } else 792 return(0); 793 } 794 795 if (DC_IS_PNIC(sc)) { 796 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 797 (phy << 23) | (reg << 18)); 798 for (i = 0; i < DC_TIMEOUT; i++) { 799 DELAY(1); 800 rval = CSR_READ_4(sc, DC_PN_MII); 801 if (!(rval & DC_PN_MII_BUSY)) { 802 rval &= 0xFFFF; 803 return(rval == 0xFFFF ? 0 : rval); 804 } 805 } 806 return(0); 807 } 808 809 if (DC_IS_COMET(sc)) { 810 switch(reg) { 811 case MII_BMCR: 812 phy_reg = DC_AL_BMCR; 813 break; 814 case MII_BMSR: 815 phy_reg = DC_AL_BMSR; 816 break; 817 case MII_PHYIDR1: 818 phy_reg = DC_AL_VENID; 819 break; 820 case MII_PHYIDR2: 821 phy_reg = DC_AL_DEVID; 822 break; 823 case MII_ANAR: 824 phy_reg = DC_AL_ANAR; 825 break; 826 case MII_ANLPAR: 827 phy_reg = DC_AL_LPAR; 828 break; 829 case MII_ANER: 830 phy_reg = DC_AL_ANER; 831 break; 832 default: 833 if_printf(&sc->arpcom.ac_if, 834 "phy_read: bad phy register %x\n", reg); 835 return(0); 836 break; 837 } 838 839 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 840 841 if (rval == 0xFFFF) 842 return(0); 843 return(rval); 844 } 845 846 frame.mii_phyaddr = phy; 847 frame.mii_regaddr = reg; 848 if (sc->dc_type == DC_TYPE_98713) { 849 phy_reg = CSR_READ_4(sc, DC_NETCFG); 850 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 851 } 852 dc_mii_readreg(sc, &frame); 853 if (sc->dc_type == DC_TYPE_98713) 854 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 855 856 return(frame.mii_data); 857 } 858 859 static int 860 dc_miibus_writereg(device_t dev, int phy, int reg, int data) 861 { 862 struct dc_softc *sc; 863 struct dc_mii_frame frame; 864 int i, phy_reg = 0; 865 866 sc = device_get_softc(dev); 867 bzero((char *)&frame, sizeof(frame)); 868 869 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 870 return(0); 871 872 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 873 return(0); 874 875 if (DC_IS_PNIC(sc)) { 876 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 877 (phy << 23) | (reg << 10) | data); 878 for (i = 0; i < DC_TIMEOUT; i++) { 879 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 880 break; 881 } 882 return(0); 883 } 884 885 if (DC_IS_COMET(sc)) { 886 switch(reg) { 887 case MII_BMCR: 888 phy_reg = DC_AL_BMCR; 889 break; 890 case MII_BMSR: 891 phy_reg = DC_AL_BMSR; 892 break; 893 case MII_PHYIDR1: 894 phy_reg = DC_AL_VENID; 895 break; 896 case MII_PHYIDR2: 897 phy_reg = DC_AL_DEVID; 898 break; 899 case MII_ANAR: 900 phy_reg = DC_AL_ANAR; 901 break; 902 case MII_ANLPAR: 903 phy_reg = DC_AL_LPAR; 904 break; 905 case MII_ANER: 906 phy_reg = DC_AL_ANER; 907 break; 908 default: 909 if_printf(&sc->arpcom.ac_if, 910 "phy_write: bad phy register %x\n", reg); 911 return(0); 912 break; 913 } 914 915 CSR_WRITE_4(sc, phy_reg, data); 916 return(0); 917 } 918 919 frame.mii_phyaddr = phy; 920 frame.mii_regaddr = reg; 921 frame.mii_data = data; 922 923 if (sc->dc_type == DC_TYPE_98713) { 924 phy_reg = CSR_READ_4(sc, DC_NETCFG); 925 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 926 } 927 dc_mii_writereg(sc, &frame); 928 if (sc->dc_type == DC_TYPE_98713) 929 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 930 931 return(0); 932 } 933 934 static void 935 dc_miibus_statchg(device_t dev) 936 { 937 struct dc_softc *sc; 938 struct mii_data *mii; 939 struct ifmedia *ifm; 940 941 sc = device_get_softc(dev); 942 if (DC_IS_ADMTEK(sc)) 943 return; 944 945 mii = device_get_softc(sc->dc_miibus); 946 ifm = &mii->mii_media; 947 if (DC_IS_DAVICOM(sc) && 948 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 949 dc_setcfg(sc, ifm->ifm_media); 950 sc->dc_if_media = ifm->ifm_media; 951 } else { 952 dc_setcfg(sc, mii->mii_media_active); 953 sc->dc_if_media = mii->mii_media_active; 954 } 955 956 return; 957 } 958 959 /* 960 * Special support for DM9102A cards with HomePNA PHYs. Note: 961 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 962 * to be impossible to talk to the management interface of the DM9801 963 * PHY (its MDIO pin is not connected to anything). Consequently, 964 * the driver has to just 'know' about the additional mode and deal 965 * with it itself. *sigh* 966 */ 967 static void 968 dc_miibus_mediainit(device_t dev) 969 { 970 struct dc_softc *sc; 971 struct mii_data *mii; 972 struct ifmedia *ifm; 973 int rev; 974 975 rev = pci_get_revid(dev); 976 977 sc = device_get_softc(dev); 978 mii = device_get_softc(sc->dc_miibus); 979 ifm = &mii->mii_media; 980 981 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 982 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 983 984 return; 985 } 986 987 #define DC_BITS_512 9 988 #define DC_BITS_128 7 989 #define DC_BITS_64 6 990 991 static u_int32_t 992 dc_crc_mask(struct dc_softc *sc) 993 { 994 /* 995 * The hash table on the PNIC II and the MX98715AEC-C/D/E 996 * chips is only 128 bits wide. 997 */ 998 if (sc->dc_flags & DC_128BIT_HASH) 999 return ((1 << DC_BITS_128) - 1); 1000 1001 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1002 if (sc->dc_flags & DC_64BIT_HASH) 1003 return ((1 << DC_BITS_64) - 1); 1004 1005 return ((1 << DC_BITS_512) - 1); 1006 } 1007 1008 /* 1009 * 21143-style RX filter setup routine. Filter programming is done by 1010 * downloading a special setup frame into the TX engine. 21143, Macronix, 1011 * PNIC, PNIC II and Davicom chips are programmed this way. 1012 * 1013 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1014 * address (our node address) and a 512-bit hash filter for multicast 1015 * frames. We also sneak the broadcast address into the hash filter since 1016 * we need that too. 1017 */ 1018 void 1019 dc_setfilt_21143(struct dc_softc *sc) 1020 { 1021 struct dc_desc *sframe; 1022 u_int32_t h, crc_mask, *sp; 1023 struct ifmultiaddr *ifma; 1024 struct ifnet *ifp; 1025 int i; 1026 1027 ifp = &sc->arpcom.ac_if; 1028 1029 i = sc->dc_cdata.dc_tx_prod; 1030 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1031 sc->dc_cdata.dc_tx_cnt++; 1032 sframe = &sc->dc_ldata->dc_tx_list[i]; 1033 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1034 bzero((char *)sp, DC_SFRAME_LEN); 1035 1036 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1037 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1038 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1039 1040 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1041 1042 /* If we want promiscuous mode, set the allframes bit. */ 1043 if (ifp->if_flags & IFF_PROMISC) 1044 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1045 else 1046 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1047 1048 if (ifp->if_flags & IFF_ALLMULTI) 1049 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1050 else 1051 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1052 1053 crc_mask = dc_crc_mask(sc); 1054 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1055 if (ifma->ifma_addr->sa_family != AF_LINK) 1056 continue; 1057 h = ether_crc32_le( 1058 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1059 ETHER_ADDR_LEN) & crc_mask; 1060 sp[h >> 4] |= 1 << (h & 0xF); 1061 } 1062 1063 if (ifp->if_flags & IFF_BROADCAST) { 1064 h = ether_crc32_le(ifp->if_broadcastaddr, 1065 ETHER_ADDR_LEN) & crc_mask; 1066 sp[h >> 4] |= 1 << (h & 0xF); 1067 } 1068 1069 /* Set our MAC address */ 1070 sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1071 sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1072 sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1073 1074 sframe->dc_status = DC_TXSTAT_OWN; 1075 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1076 1077 /* 1078 * The PNIC takes an exceedingly long time to process its 1079 * setup frame; wait 10ms after posting the setup frame 1080 * before proceeding, just so it has time to swallow its 1081 * medicine. 1082 */ 1083 DELAY(10000); 1084 1085 ifp->if_timer = 5; 1086 1087 return; 1088 } 1089 1090 void 1091 dc_setfilt_admtek(struct dc_softc *sc) 1092 { 1093 struct ifnet *ifp; 1094 int h = 0; 1095 u_int32_t crc_mask; 1096 u_int32_t hashes[2] = { 0, 0 }; 1097 struct ifmultiaddr *ifma; 1098 1099 ifp = &sc->arpcom.ac_if; 1100 1101 /* Init our MAC address */ 1102 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1103 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1104 1105 /* If we want promiscuous mode, set the allframes bit. */ 1106 if (ifp->if_flags & IFF_PROMISC) 1107 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1108 else 1109 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1110 1111 if (ifp->if_flags & IFF_ALLMULTI) 1112 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1113 else 1114 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1115 1116 /* first, zot all the existing hash bits */ 1117 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1118 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1119 1120 /* 1121 * If we're already in promisc or allmulti mode, we 1122 * don't have to bother programming the multicast filter. 1123 */ 1124 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1125 return; 1126 1127 /* now program new ones */ 1128 if (DC_IS_CENTAUR(sc)) 1129 crc_mask = dc_crc_mask(sc); 1130 else 1131 crc_mask = 0x3f; 1132 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1133 if (ifma->ifma_addr->sa_family != AF_LINK) 1134 continue; 1135 if (DC_IS_CENTAUR(sc)) { 1136 h = ether_crc32_le( 1137 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1138 ETHER_ADDR_LEN) & crc_mask; 1139 } else { 1140 h = ether_crc32_be( 1141 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1142 ETHER_ADDR_LEN); 1143 h = (h >> 26) & crc_mask; 1144 } 1145 if (h < 32) 1146 hashes[0] |= (1 << h); 1147 else 1148 hashes[1] |= (1 << (h - 32)); 1149 } 1150 1151 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1152 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1153 1154 return; 1155 } 1156 1157 void 1158 dc_setfilt_asix(struct dc_softc *sc) 1159 { 1160 struct ifnet *ifp; 1161 int h = 0; 1162 u_int32_t hashes[2] = { 0, 0 }; 1163 struct ifmultiaddr *ifma; 1164 1165 ifp = &sc->arpcom.ac_if; 1166 1167 /* Init our MAC address */ 1168 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1169 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1170 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1171 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1172 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1173 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1174 1175 /* If we want promiscuous mode, set the allframes bit. */ 1176 if (ifp->if_flags & IFF_PROMISC) 1177 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1178 else 1179 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1180 1181 if (ifp->if_flags & IFF_ALLMULTI) 1182 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1183 else 1184 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1185 1186 /* 1187 * The ASIX chip has a special bit to enable reception 1188 * of broadcast frames. 1189 */ 1190 if (ifp->if_flags & IFF_BROADCAST) 1191 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1192 else 1193 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1194 1195 /* first, zot all the existing hash bits */ 1196 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1197 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1198 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1199 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1200 1201 /* 1202 * If we're already in promisc or allmulti mode, we 1203 * don't have to bother programming the multicast filter. 1204 */ 1205 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1206 return; 1207 1208 /* now program new ones */ 1209 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1210 if (ifma->ifma_addr->sa_family != AF_LINK) 1211 continue; 1212 h = ether_crc32_be( 1213 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1214 ETHER_ADDR_LEN); 1215 h = (h >> 26) & 0x3f; 1216 if (h < 32) 1217 hashes[0] |= (1 << h); 1218 else 1219 hashes[1] |= (1 << (h - 32)); 1220 } 1221 1222 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1223 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1224 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1225 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1226 1227 return; 1228 } 1229 1230 static void 1231 dc_setfilt(struct dc_softc *sc) 1232 { 1233 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1234 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1235 dc_setfilt_21143(sc); 1236 1237 if (DC_IS_ASIX(sc)) 1238 dc_setfilt_asix(sc); 1239 1240 if (DC_IS_ADMTEK(sc)) 1241 dc_setfilt_admtek(sc); 1242 1243 return; 1244 } 1245 1246 /* 1247 * In order to fiddle with the 1248 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1249 * first have to put the transmit and/or receive logic in the idle state. 1250 */ 1251 static void 1252 dc_setcfg(struct dc_softc *sc, int media) 1253 { 1254 int i, restart = 0; 1255 u_int32_t isr; 1256 1257 if (IFM_SUBTYPE(media) == IFM_NONE) 1258 return; 1259 1260 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1261 restart = 1; 1262 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1263 1264 for (i = 0; i < DC_TIMEOUT; i++) { 1265 isr = CSR_READ_4(sc, DC_ISR); 1266 if ((isr & DC_ISR_TX_IDLE) && 1267 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1268 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1269 break; 1270 DELAY(10); 1271 } 1272 1273 if (i == DC_TIMEOUT) { 1274 if_printf(&sc->arpcom.ac_if, 1275 "failed to force tx and rx to idle state\n"); 1276 } 1277 } 1278 1279 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1280 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1281 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1282 if (sc->dc_pmode == DC_PMODE_MII) { 1283 int watchdogreg; 1284 1285 if (DC_IS_INTEL(sc)) { 1286 /* there's a write enable bit here that reads as 1 */ 1287 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1288 watchdogreg &= ~DC_WDOG_CTLWREN; 1289 watchdogreg |= DC_WDOG_JABBERDIS; 1290 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1291 } else { 1292 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1293 } 1294 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1295 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1296 if (sc->dc_type == DC_TYPE_98713) 1297 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1298 DC_NETCFG_SCRAMBLER)); 1299 if (!DC_IS_DAVICOM(sc)) 1300 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1301 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1302 if (DC_IS_INTEL(sc)) 1303 dc_apply_fixup(sc, IFM_AUTO); 1304 } else { 1305 if (DC_IS_PNIC(sc)) { 1306 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1307 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1308 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1309 } 1310 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1311 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1312 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1313 if (DC_IS_INTEL(sc)) 1314 dc_apply_fixup(sc, 1315 (media & IFM_GMASK) == IFM_FDX ? 1316 IFM_100_TX|IFM_FDX : IFM_100_TX); 1317 } 1318 } 1319 1320 if (IFM_SUBTYPE(media) == IFM_10_T) { 1321 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1322 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1323 if (sc->dc_pmode == DC_PMODE_MII) { 1324 int watchdogreg; 1325 1326 /* there's a write enable bit here that reads as 1 */ 1327 if (DC_IS_INTEL(sc)) { 1328 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1329 watchdogreg &= ~DC_WDOG_CTLWREN; 1330 watchdogreg |= DC_WDOG_JABBERDIS; 1331 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1332 } else { 1333 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1334 } 1335 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1336 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1337 if (sc->dc_type == DC_TYPE_98713) 1338 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1339 if (!DC_IS_DAVICOM(sc)) 1340 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1341 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1342 if (DC_IS_INTEL(sc)) 1343 dc_apply_fixup(sc, IFM_AUTO); 1344 } else { 1345 if (DC_IS_PNIC(sc)) { 1346 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1347 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1348 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1349 } 1350 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1351 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1352 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1353 if (DC_IS_INTEL(sc)) { 1354 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1355 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1356 if ((media & IFM_GMASK) == IFM_FDX) 1357 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1358 else 1359 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1360 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1361 DC_CLRBIT(sc, DC_10BTCTRL, 1362 DC_TCTL_AUTONEGENBL); 1363 dc_apply_fixup(sc, 1364 (media & IFM_GMASK) == IFM_FDX ? 1365 IFM_10_T|IFM_FDX : IFM_10_T); 1366 DELAY(20000); 1367 } 1368 } 1369 } 1370 1371 /* 1372 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1373 * PHY and we want HomePNA mode, set the portsel bit to turn 1374 * on the external MII port. 1375 */ 1376 if (DC_IS_DAVICOM(sc)) { 1377 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1378 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1379 sc->dc_link = 1; 1380 } else { 1381 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1382 } 1383 } 1384 1385 if ((media & IFM_GMASK) == IFM_FDX) { 1386 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1387 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1388 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1389 } else { 1390 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1391 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1392 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1393 } 1394 1395 if (restart) 1396 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1397 1398 return; 1399 } 1400 1401 static void 1402 dc_reset(struct dc_softc *sc) 1403 { 1404 int i; 1405 1406 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1407 1408 for (i = 0; i < DC_TIMEOUT; i++) { 1409 DELAY(10); 1410 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1411 break; 1412 } 1413 1414 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc)) { 1415 DELAY(10000); 1416 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1417 i = 0; 1418 } 1419 1420 if (i == DC_TIMEOUT) 1421 if_printf(&sc->arpcom.ac_if, "reset never completed!\n"); 1422 1423 /* Wait a little while for the chip to get its brains in order. */ 1424 DELAY(1000); 1425 1426 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1427 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1428 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1429 1430 /* 1431 * Bring the SIA out of reset. In some cases, it looks 1432 * like failing to unreset the SIA soon enough gets it 1433 * into a state where it will never come out of reset 1434 * until we reset the whole chip again. 1435 */ 1436 if (DC_IS_INTEL(sc)) { 1437 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1438 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1439 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1440 } 1441 1442 return; 1443 } 1444 1445 static const struct dc_type * 1446 dc_devtype(device_t dev) 1447 { 1448 const struct dc_type *t; 1449 u_int32_t rev; 1450 1451 t = dc_devs; 1452 1453 while(t->dc_name != NULL) { 1454 if ((pci_get_vendor(dev) == t->dc_vid) && 1455 (pci_get_device(dev) == t->dc_did)) { 1456 /* Check the PCI revision */ 1457 rev = pci_get_revid(dev); 1458 if (t->dc_did == DC_DEVICEID_98713 && 1459 rev >= DC_REVISION_98713A) 1460 t++; 1461 if (t->dc_did == DC_DEVICEID_98713_CP && 1462 rev >= DC_REVISION_98713A) 1463 t++; 1464 if (t->dc_did == DC_DEVICEID_987x5 && 1465 rev >= DC_REVISION_98715AEC_C) 1466 t++; 1467 if (t->dc_did == DC_DEVICEID_987x5 && 1468 rev >= DC_REVISION_98725) 1469 t++; 1470 if (t->dc_did == DC_DEVICEID_AX88140A && 1471 rev >= DC_REVISION_88141) 1472 t++; 1473 if (t->dc_did == DC_DEVICEID_82C168 && 1474 rev >= DC_REVISION_82C169) 1475 t++; 1476 if (t->dc_did == DC_DEVICEID_DM9102 && 1477 rev >= DC_REVISION_DM9102A) 1478 t++; 1479 return(t); 1480 } 1481 t++; 1482 } 1483 1484 return(NULL); 1485 } 1486 1487 /* 1488 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1489 * IDs against our list and return a device name if we find a match. 1490 * We do a little bit of extra work to identify the exact type of 1491 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1492 * but different revision IDs. The same is true for 98715/98715A 1493 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1494 * cases, the exact chip revision affects driver behavior. 1495 */ 1496 static int 1497 dc_probe(device_t dev) 1498 { 1499 const struct dc_type *t; 1500 1501 t = dc_devtype(dev); 1502 if (t != NULL) { 1503 struct dc_softc *sc = device_get_softc(dev); 1504 1505 /* Need this info to decide on a chip type. */ 1506 sc->dc_info = t; 1507 device_set_desc(dev, t->dc_name); 1508 return(0); 1509 } 1510 1511 return(ENXIO); 1512 } 1513 1514 static void 1515 dc_acpi(device_t dev) 1516 { 1517 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1518 uint32_t iobase, membase, irq; 1519 struct dc_softc *sc; 1520 1521 /* Save important PCI config data. */ 1522 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1523 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1524 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1525 1526 sc = device_get_softc(dev); 1527 /* Reset the power state. */ 1528 if_printf(&sc->arpcom.ac_if, 1529 "chip is in D%d power mode " 1530 "-- setting to D0\n", pci_get_powerstate(dev)); 1531 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1532 1533 /* Restore PCI config data. */ 1534 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1535 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1536 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1537 } 1538 } 1539 1540 static void 1541 dc_apply_fixup(struct dc_softc *sc, int media) 1542 { 1543 struct dc_mediainfo *m; 1544 u_int8_t *p; 1545 int i; 1546 u_int32_t reg; 1547 1548 m = sc->dc_mi; 1549 1550 while (m != NULL) { 1551 if (m->dc_media == media) 1552 break; 1553 m = m->dc_next; 1554 } 1555 1556 if (m == NULL) 1557 return; 1558 1559 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1560 reg = (p[0] | (p[1] << 8)) << 16; 1561 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1562 } 1563 1564 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1565 reg = (p[0] | (p[1] << 8)) << 16; 1566 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1567 } 1568 1569 return; 1570 } 1571 1572 static void 1573 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1574 { 1575 struct dc_mediainfo *m; 1576 1577 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1578 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT){ 1579 case DC_SIA_CODE_10BT: 1580 m->dc_media = IFM_10_T; 1581 break; 1582 1583 case DC_SIA_CODE_10BT_FDX: 1584 m->dc_media = IFM_10_T|IFM_FDX; 1585 break; 1586 1587 case DC_SIA_CODE_10B2: 1588 m->dc_media = IFM_10_2; 1589 break; 1590 1591 case DC_SIA_CODE_10B5: 1592 m->dc_media = IFM_10_5; 1593 break; 1594 } 1595 if (l->dc_sia_code & DC_SIA_CODE_EXT){ 1596 m->dc_gp_len = 2; 1597 m->dc_gp_ptr = 1598 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1599 } else { 1600 m->dc_gp_len = 2; 1601 m->dc_gp_ptr = 1602 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1603 } 1604 1605 m->dc_next = sc->dc_mi; 1606 sc->dc_mi = m; 1607 1608 sc->dc_pmode = DC_PMODE_SIA; 1609 1610 return; 1611 } 1612 1613 static void 1614 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1615 { 1616 struct dc_mediainfo *m; 1617 1618 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1619 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1620 m->dc_media = IFM_100_TX; 1621 1622 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1623 m->dc_media = IFM_100_TX|IFM_FDX; 1624 1625 m->dc_gp_len = 2; 1626 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1627 1628 m->dc_next = sc->dc_mi; 1629 sc->dc_mi = m; 1630 1631 sc->dc_pmode = DC_PMODE_SYM; 1632 1633 return; 1634 } 1635 1636 static void 1637 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1638 { 1639 u_int8_t *p; 1640 struct dc_mediainfo *m; 1641 1642 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1643 /* We abuse IFM_AUTO to represent MII. */ 1644 m->dc_media = IFM_AUTO; 1645 m->dc_gp_len = l->dc_gpr_len; 1646 1647 p = (u_int8_t *)l; 1648 p += sizeof(struct dc_eblock_mii); 1649 m->dc_gp_ptr = p; 1650 p += 2 * l->dc_gpr_len; 1651 m->dc_reset_len = *p; 1652 p++; 1653 m->dc_reset_ptr = p; 1654 1655 m->dc_next = sc->dc_mi; 1656 sc->dc_mi = m; 1657 1658 return; 1659 } 1660 1661 static void 1662 dc_read_srom(struct dc_softc *sc, int bits) 1663 { 1664 int size; 1665 1666 size = 2 << bits; 1667 sc->dc_srom = malloc(size, M_DEVBUF, M_INTWAIT); 1668 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1669 } 1670 1671 static void 1672 dc_parse_21143_srom(struct dc_softc *sc) 1673 { 1674 struct dc_leaf_hdr *lhdr; 1675 struct dc_eblock_hdr *hdr; 1676 int i, loff; 1677 char *ptr; 1678 int have_mii; 1679 1680 have_mii = 0; 1681 loff = sc->dc_srom[27]; 1682 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1683 1684 ptr = (char *)lhdr; 1685 ptr += sizeof(struct dc_leaf_hdr) - 1; 1686 /* 1687 * Look if we got a MII media block. 1688 */ 1689 for (i = 0; i < lhdr->dc_mcnt; i++) { 1690 hdr = (struct dc_eblock_hdr *)ptr; 1691 if (hdr->dc_type == DC_EBLOCK_MII) 1692 have_mii++; 1693 1694 ptr += (hdr->dc_len & 0x7F); 1695 ptr++; 1696 } 1697 1698 /* 1699 * Do the same thing again. Only use SIA and SYM media 1700 * blocks if no MII media block is available. 1701 */ 1702 ptr = (char *)lhdr; 1703 ptr += sizeof(struct dc_leaf_hdr) - 1; 1704 for (i = 0; i < lhdr->dc_mcnt; i++) { 1705 hdr = (struct dc_eblock_hdr *)ptr; 1706 switch(hdr->dc_type) { 1707 case DC_EBLOCK_MII: 1708 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1709 break; 1710 case DC_EBLOCK_SIA: 1711 if (! have_mii) 1712 dc_decode_leaf_sia(sc, 1713 (struct dc_eblock_sia *)hdr); 1714 break; 1715 case DC_EBLOCK_SYM: 1716 if (! have_mii) 1717 dc_decode_leaf_sym(sc, 1718 (struct dc_eblock_sym *)hdr); 1719 break; 1720 default: 1721 /* Don't care. Yet. */ 1722 break; 1723 } 1724 ptr += (hdr->dc_len & 0x7F); 1725 ptr++; 1726 } 1727 1728 return; 1729 } 1730 1731 /* 1732 * Attach the interface. Allocate softc structures, do ifmedia 1733 * setup and ethernet/BPF attach. 1734 */ 1735 static int 1736 dc_attach(device_t dev) 1737 { 1738 int tmp = 0; 1739 u_char eaddr[ETHER_ADDR_LEN]; 1740 u_int32_t command; 1741 struct dc_softc *sc; 1742 struct ifnet *ifp; 1743 u_int32_t revision; 1744 int error = 0, rid, mac_offset; 1745 1746 sc = device_get_softc(dev); 1747 callout_init(&sc->dc_stat_timer); 1748 1749 ifp = &sc->arpcom.ac_if; 1750 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1751 1752 /* 1753 * Handle power management nonsense. 1754 */ 1755 dc_acpi(dev); 1756 1757 /* 1758 * Map control/status registers. 1759 */ 1760 pci_enable_busmaster(dev); 1761 1762 rid = DC_RID; 1763 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); 1764 1765 if (sc->dc_res == NULL) { 1766 device_printf(dev, "couldn't map ports/memory\n"); 1767 error = ENXIO; 1768 goto fail; 1769 } 1770 1771 sc->dc_btag = rman_get_bustag(sc->dc_res); 1772 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1773 1774 /* Allocate interrupt */ 1775 rid = 0; 1776 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1777 RF_SHAREABLE | RF_ACTIVE); 1778 1779 if (sc->dc_irq == NULL) { 1780 device_printf(dev, "couldn't map interrupt\n"); 1781 error = ENXIO; 1782 goto fail; 1783 } 1784 1785 revision = pci_get_revid(dev); 1786 1787 /* Get the eeprom width, but PNIC has diff eeprom */ 1788 if (sc->dc_info->dc_did != DC_DEVICEID_82C168) 1789 dc_eeprom_width(sc); 1790 1791 switch(sc->dc_info->dc_did) { 1792 case DC_DEVICEID_21143: 1793 sc->dc_type = DC_TYPE_21143; 1794 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1795 sc->dc_flags |= DC_REDUCED_MII_POLL; 1796 /* Save EEPROM contents so we can parse them later. */ 1797 dc_read_srom(sc, sc->dc_romwidth); 1798 break; 1799 case DC_DEVICEID_DM9009: 1800 case DC_DEVICEID_DM9100: 1801 case DC_DEVICEID_DM9102: 1802 sc->dc_type = DC_TYPE_DM9102; 1803 sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; 1804 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; 1805 sc->dc_flags |= DC_TX_ALIGN; 1806 sc->dc_pmode = DC_PMODE_MII; 1807 /* Increase the latency timer value. */ 1808 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1809 command &= 0xFFFF00FF; 1810 command |= 0x00008000; 1811 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1812 break; 1813 case DC_DEVICEID_AL981: 1814 sc->dc_type = DC_TYPE_AL981; 1815 sc->dc_flags |= DC_TX_USE_TX_INTR; 1816 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1817 sc->dc_pmode = DC_PMODE_MII; 1818 dc_read_srom(sc, sc->dc_romwidth); 1819 break; 1820 case DC_DEVICEID_AN985: 1821 case DC_DEVICEID_ADM9511: 1822 case DC_DEVICEID_ADM9513: 1823 case DC_DEVICEID_EN2242: 1824 case DC_DEVICEID_3CSOHOB: 1825 sc->dc_type = DC_TYPE_AN985; 1826 sc->dc_flags |= DC_64BIT_HASH; 1827 sc->dc_flags |= DC_TX_USE_TX_INTR; 1828 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1829 sc->dc_pmode = DC_PMODE_MII; 1830 break; 1831 case DC_DEVICEID_98713: 1832 case DC_DEVICEID_98713_CP: 1833 if (revision < DC_REVISION_98713A) { 1834 sc->dc_type = DC_TYPE_98713; 1835 } 1836 if (revision >= DC_REVISION_98713A) { 1837 sc->dc_type = DC_TYPE_98713A; 1838 sc->dc_flags |= DC_21143_NWAY; 1839 } 1840 sc->dc_flags |= DC_REDUCED_MII_POLL; 1841 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1842 break; 1843 case DC_DEVICEID_987x5: 1844 case DC_DEVICEID_EN1217: 1845 /* 1846 * Macronix MX98715AEC-C/D/E parts have only a 1847 * 128-bit hash table. We need to deal with these 1848 * in the same manner as the PNIC II so that we 1849 * get the right number of bits out of the 1850 * CRC routine. 1851 */ 1852 if (revision >= DC_REVISION_98715AEC_C && 1853 revision < DC_REVISION_98725) 1854 sc->dc_flags |= DC_128BIT_HASH; 1855 sc->dc_type = DC_TYPE_987x5; 1856 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1857 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1858 break; 1859 case DC_DEVICEID_98727: 1860 sc->dc_type = DC_TYPE_987x5; 1861 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1862 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1863 break; 1864 case DC_DEVICEID_82C115: 1865 sc->dc_type = DC_TYPE_PNICII; 1866 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; 1867 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1868 break; 1869 case DC_DEVICEID_82C168: 1870 sc->dc_type = DC_TYPE_PNIC; 1871 sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; 1872 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 1873 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_WAITOK); 1874 if (revision < DC_REVISION_82C169) 1875 sc->dc_pmode = DC_PMODE_SYM; 1876 break; 1877 case DC_DEVICEID_AX88140A: 1878 sc->dc_type = DC_TYPE_ASIX; 1879 sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; 1880 sc->dc_flags |= DC_REDUCED_MII_POLL; 1881 sc->dc_pmode = DC_PMODE_MII; 1882 break; 1883 case DC_DEVICEID_RS7112: 1884 sc->dc_type = DC_TYPE_CONEXANT; 1885 sc->dc_flags |= DC_TX_INTR_ALWAYS; 1886 sc->dc_flags |= DC_REDUCED_MII_POLL; 1887 sc->dc_pmode = DC_PMODE_MII; 1888 dc_read_srom(sc, sc->dc_romwidth); 1889 break; 1890 default: 1891 device_printf(dev, "unknown device: %x\n", sc->dc_info->dc_did); 1892 break; 1893 } 1894 1895 /* Save the cache line size. */ 1896 if (DC_IS_DAVICOM(sc)) 1897 sc->dc_cachesize = 0; 1898 else 1899 sc->dc_cachesize = pci_read_config(dev, 1900 DC_PCI_CFLT, 4) & 0xFF; 1901 1902 /* Reset the adapter. */ 1903 dc_reset(sc); 1904 1905 /* Take 21143 out of snooze mode */ 1906 if (DC_IS_INTEL(sc)) { 1907 command = pci_read_config(dev, DC_PCI_CFDD, 4); 1908 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 1909 pci_write_config(dev, DC_PCI_CFDD, command, 4); 1910 } 1911 1912 /* 1913 * Try to learn something about the supported media. 1914 * We know that ASIX and ADMtek and Davicom devices 1915 * will *always* be using MII media, so that's a no-brainer. 1916 * The tricky ones are the Macronix/PNIC II and the 1917 * Intel 21143. 1918 */ 1919 if (DC_IS_INTEL(sc)) 1920 dc_parse_21143_srom(sc); 1921 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 1922 if (sc->dc_type == DC_TYPE_98713) 1923 sc->dc_pmode = DC_PMODE_MII; 1924 else 1925 sc->dc_pmode = DC_PMODE_SYM; 1926 } else if (!sc->dc_pmode) 1927 sc->dc_pmode = DC_PMODE_MII; 1928 1929 /* 1930 * Get station address from the EEPROM. 1931 */ 1932 switch(sc->dc_type) { 1933 case DC_TYPE_98713: 1934 case DC_TYPE_98713A: 1935 case DC_TYPE_987x5: 1936 case DC_TYPE_PNICII: 1937 dc_read_eeprom(sc, (caddr_t)&mac_offset, 1938 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 1939 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 1940 break; 1941 case DC_TYPE_PNIC: 1942 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 1943 break; 1944 case DC_TYPE_DM9102: 1945 case DC_TYPE_21143: 1946 case DC_TYPE_ASIX: 1947 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 1948 break; 1949 case DC_TYPE_AL981: 1950 case DC_TYPE_AN985: 1951 *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc,DC_AL_PAR0); 1952 *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc,DC_AL_PAR1); 1953 break; 1954 case DC_TYPE_CONEXANT: 1955 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 6); 1956 break; 1957 default: 1958 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 1959 break; 1960 } 1961 1962 sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, 1963 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 1964 1965 if (sc->dc_ldata == NULL) { 1966 device_printf(dev, "no memory for list buffers!\n"); 1967 error = ENXIO; 1968 goto fail; 1969 } 1970 1971 bzero(sc->dc_ldata, sizeof(struct dc_list_data)); 1972 1973 ifp->if_softc = sc; 1974 ifp->if_mtu = ETHERMTU; 1975 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1976 ifp->if_ioctl = dc_ioctl; 1977 ifp->if_start = dc_start; 1978 #ifdef DEVICE_POLLING 1979 ifp->if_poll = dc_poll; 1980 #endif 1981 ifp->if_watchdog = dc_watchdog; 1982 ifp->if_init = dc_init; 1983 ifp->if_baudrate = 10000000; 1984 ifq_set_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1); 1985 ifq_set_ready(&ifp->if_snd); 1986 1987 /* 1988 * Do MII setup. If this is a 21143, check for a PHY on the 1989 * MII bus after applying any necessary fixups to twiddle the 1990 * GPIO bits. If we don't end up finding a PHY, restore the 1991 * old selection (SIA only or SIA/SYM) and attach the dcphy 1992 * driver instead. 1993 */ 1994 if (DC_IS_INTEL(sc)) { 1995 dc_apply_fixup(sc, IFM_AUTO); 1996 tmp = sc->dc_pmode; 1997 sc->dc_pmode = DC_PMODE_MII; 1998 } 1999 2000 error = mii_phy_probe(dev, &sc->dc_miibus, 2001 dc_ifmedia_upd, dc_ifmedia_sts); 2002 2003 if (error && DC_IS_INTEL(sc)) { 2004 sc->dc_pmode = tmp; 2005 if (sc->dc_pmode != DC_PMODE_SIA) 2006 sc->dc_pmode = DC_PMODE_SYM; 2007 sc->dc_flags |= DC_21143_NWAY; 2008 mii_phy_probe(dev, &sc->dc_miibus, 2009 dc_ifmedia_upd, dc_ifmedia_sts); 2010 /* 2011 * For non-MII cards, we need to have the 21143 2012 * drive the LEDs. Except there are some systems 2013 * like the NEC VersaPro NoteBook PC which have no 2014 * LEDs, and twiddling these bits has adverse effects 2015 * on them. (I.e. you suddenly can't get a link.) 2016 */ 2017 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2018 sc->dc_flags |= DC_TULIP_LEDS; 2019 error = 0; 2020 } 2021 2022 if (error) { 2023 device_printf(dev, "MII without any PHY!\n"); 2024 error = ENXIO; 2025 goto fail; 2026 } 2027 2028 /* 2029 * Call MI attach routine. 2030 */ 2031 ether_ifattach(ifp, eaddr); 2032 2033 if (DC_IS_ADMTEK(sc)) { 2034 /* 2035 * Set automatic TX underrun recovery for the ADMtek chips 2036 */ 2037 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2038 } 2039 2040 /* 2041 * Tell the upper layer(s) we support long frames. 2042 */ 2043 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2044 2045 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET, 2046 dc_intr, sc, &sc->dc_intrhand, NULL); 2047 if (error) { 2048 ether_ifdetach(ifp); 2049 device_printf(dev, "couldn't set up irq\n"); 2050 goto fail; 2051 } 2052 2053 return(0); 2054 2055 fail: 2056 dc_detach(dev); 2057 return(error); 2058 } 2059 2060 static int 2061 dc_detach(device_t dev) 2062 { 2063 struct dc_softc *sc = device_get_softc(dev); 2064 struct ifnet *ifp = &sc->arpcom.ac_if; 2065 struct dc_mediainfo *m; 2066 2067 crit_enter(); 2068 2069 if (device_is_attached(dev)) { 2070 dc_stop(sc); 2071 ether_ifdetach(ifp); 2072 } 2073 2074 if (sc->dc_miibus) 2075 device_delete_child(dev, sc->dc_miibus); 2076 bus_generic_detach(dev); 2077 2078 if (sc->dc_intrhand) 2079 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2080 2081 crit_exit(); 2082 2083 if (sc->dc_irq) 2084 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2085 if (sc->dc_res) 2086 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2087 2088 if (sc->dc_ldata) 2089 contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); 2090 if (sc->dc_pnic_rx_buf != NULL) 2091 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2092 2093 while(sc->dc_mi != NULL) { 2094 m = sc->dc_mi->dc_next; 2095 free(sc->dc_mi, M_DEVBUF); 2096 sc->dc_mi = m; 2097 } 2098 2099 if (sc->dc_srom) 2100 free(sc->dc_srom, M_DEVBUF); 2101 2102 return(0); 2103 } 2104 2105 /* 2106 * Initialize the transmit descriptors. 2107 */ 2108 static int 2109 dc_list_tx_init(struct dc_softc *sc) 2110 { 2111 struct dc_chain_data *cd; 2112 struct dc_list_data *ld; 2113 int i; 2114 2115 cd = &sc->dc_cdata; 2116 ld = sc->dc_ldata; 2117 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2118 if (i == (DC_TX_LIST_CNT - 1)) { 2119 ld->dc_tx_list[i].dc_next = 2120 vtophys(&ld->dc_tx_list[0]); 2121 } else { 2122 ld->dc_tx_list[i].dc_next = 2123 vtophys(&ld->dc_tx_list[i + 1]); 2124 } 2125 cd->dc_tx_chain[i] = NULL; 2126 ld->dc_tx_list[i].dc_data = 0; 2127 ld->dc_tx_list[i].dc_ctl = 0; 2128 } 2129 2130 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2131 2132 return(0); 2133 } 2134 2135 2136 /* 2137 * Initialize the RX descriptors and allocate mbufs for them. Note that 2138 * we arrange the descriptors in a closed ring, so that the last descriptor 2139 * points back to the first. 2140 */ 2141 static int 2142 dc_list_rx_init(struct dc_softc *sc) 2143 { 2144 struct dc_chain_data *cd; 2145 struct dc_list_data *ld; 2146 int i; 2147 2148 cd = &sc->dc_cdata; 2149 ld = sc->dc_ldata; 2150 2151 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2152 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 2153 return(ENOBUFS); 2154 if (i == (DC_RX_LIST_CNT - 1)) { 2155 ld->dc_rx_list[i].dc_next = 2156 vtophys(&ld->dc_rx_list[0]); 2157 } else { 2158 ld->dc_rx_list[i].dc_next = 2159 vtophys(&ld->dc_rx_list[i + 1]); 2160 } 2161 } 2162 2163 cd->dc_rx_prod = 0; 2164 2165 return(0); 2166 } 2167 2168 /* 2169 * Initialize an RX descriptor and attach an MBUF cluster. 2170 */ 2171 static int 2172 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m) 2173 { 2174 struct mbuf *m_new = NULL; 2175 struct dc_desc *c; 2176 2177 c = &sc->dc_ldata->dc_rx_list[i]; 2178 2179 if (m == NULL) { 2180 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 2181 if (m_new == NULL) 2182 return (ENOBUFS); 2183 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2184 } else { 2185 m_new = m; 2186 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2187 m_new->m_data = m_new->m_ext.ext_buf; 2188 } 2189 2190 m_adj(m_new, sizeof(u_int64_t)); 2191 2192 /* 2193 * If this is a PNIC chip, zero the buffer. This is part 2194 * of the workaround for the receive bug in the 82c168 and 2195 * 82c169 chips. 2196 */ 2197 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2198 bzero((char *)mtod(m_new, char *), m_new->m_len); 2199 2200 sc->dc_cdata.dc_rx_chain[i] = m_new; 2201 c->dc_data = vtophys(mtod(m_new, caddr_t)); 2202 c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; 2203 c->dc_status = DC_RXSTAT_OWN; 2204 2205 return(0); 2206 } 2207 2208 /* 2209 * Grrrrr. 2210 * The PNIC chip has a terrible bug in it that manifests itself during 2211 * periods of heavy activity. The exact mode of failure if difficult to 2212 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2213 * will happen on slow machines. The bug is that sometimes instead of 2214 * uploading one complete frame during reception, it uploads what looks 2215 * like the entire contents of its FIFO memory. The frame we want is at 2216 * the end of the whole mess, but we never know exactly how much data has 2217 * been uploaded, so salvaging the frame is hard. 2218 * 2219 * There is only one way to do it reliably, and it's disgusting. 2220 * Here's what we know: 2221 * 2222 * - We know there will always be somewhere between one and three extra 2223 * descriptors uploaded. 2224 * 2225 * - We know the desired received frame will always be at the end of the 2226 * total data upload. 2227 * 2228 * - We know the size of the desired received frame because it will be 2229 * provided in the length field of the status word in the last descriptor. 2230 * 2231 * Here's what we do: 2232 * 2233 * - When we allocate buffers for the receive ring, we bzero() them. 2234 * This means that we know that the buffer contents should be all 2235 * zeros, except for data uploaded by the chip. 2236 * 2237 * - We also force the PNIC chip to upload frames that include the 2238 * ethernet CRC at the end. 2239 * 2240 * - We gather all of the bogus frame data into a single buffer. 2241 * 2242 * - We then position a pointer at the end of this buffer and scan 2243 * backwards until we encounter the first non-zero byte of data. 2244 * This is the end of the received frame. We know we will encounter 2245 * some data at the end of the frame because the CRC will always be 2246 * there, so even if the sender transmits a packet of all zeros, 2247 * we won't be fooled. 2248 * 2249 * - We know the size of the actual received frame, so we subtract 2250 * that value from the current pointer location. This brings us 2251 * to the start of the actual received packet. 2252 * 2253 * - We copy this into an mbuf and pass it on, along with the actual 2254 * frame length. 2255 * 2256 * The performance hit is tremendous, but it beats dropping frames all 2257 * the time. 2258 */ 2259 2260 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2261 static void 2262 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2263 { 2264 struct dc_desc *cur_rx; 2265 struct dc_desc *c = NULL; 2266 struct mbuf *m = NULL; 2267 unsigned char *ptr; 2268 int i, total_len; 2269 u_int32_t rxstat = 0; 2270 2271 i = sc->dc_pnic_rx_bug_save; 2272 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2273 ptr = sc->dc_pnic_rx_buf; 2274 bzero(ptr, DC_RXLEN * 5); 2275 2276 /* Copy all the bytes from the bogus buffers. */ 2277 while (1) { 2278 c = &sc->dc_ldata->dc_rx_list[i]; 2279 rxstat = c->dc_status; 2280 m = sc->dc_cdata.dc_rx_chain[i]; 2281 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2282 ptr += DC_RXLEN; 2283 /* If this is the last buffer, break out. */ 2284 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2285 break; 2286 dc_newbuf(sc, i, m); 2287 DC_INC(i, DC_RX_LIST_CNT); 2288 } 2289 2290 /* Find the length of the actual receive frame. */ 2291 total_len = DC_RXBYTES(rxstat); 2292 2293 /* Scan backwards until we hit a non-zero byte. */ 2294 while(*ptr == 0x00) 2295 ptr--; 2296 2297 /* Round off. */ 2298 if ((uintptr_t)(ptr) & 0x3) 2299 ptr -= 1; 2300 2301 /* Now find the start of the frame. */ 2302 ptr -= total_len; 2303 if (ptr < sc->dc_pnic_rx_buf) 2304 ptr = sc->dc_pnic_rx_buf; 2305 2306 /* 2307 * Now copy the salvaged frame to the last mbuf and fake up 2308 * the status word to make it look like a successful 2309 * frame reception. 2310 */ 2311 dc_newbuf(sc, i, m); 2312 bcopy(ptr, mtod(m, char *), total_len); 2313 cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; 2314 2315 return; 2316 } 2317 2318 /* 2319 * This routine searches the RX ring for dirty descriptors in the 2320 * event that the rxeof routine falls out of sync with the chip's 2321 * current descriptor pointer. This may happen sometimes as a result 2322 * of a "no RX buffer available" condition that happens when the chip 2323 * consumes all of the RX buffers before the driver has a chance to 2324 * process the RX ring. This routine may need to be called more than 2325 * once to bring the driver back in sync with the chip, however we 2326 * should still be getting RX DONE interrupts to drive the search 2327 * for new packets in the RX ring, so we should catch up eventually. 2328 */ 2329 static int 2330 dc_rx_resync(struct dc_softc *sc) 2331 { 2332 int i, pos; 2333 struct dc_desc *cur_rx; 2334 2335 pos = sc->dc_cdata.dc_rx_prod; 2336 2337 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2338 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2339 if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) 2340 break; 2341 DC_INC(pos, DC_RX_LIST_CNT); 2342 } 2343 2344 /* If the ring really is empty, then just return. */ 2345 if (i == DC_RX_LIST_CNT) 2346 return(0); 2347 2348 /* We've fallen behing the chip: catch it. */ 2349 sc->dc_cdata.dc_rx_prod = pos; 2350 2351 return(EAGAIN); 2352 } 2353 2354 /* 2355 * A frame has been uploaded: pass the resulting mbuf chain up to 2356 * the higher level protocols. 2357 */ 2358 static void 2359 dc_rxeof(struct dc_softc *sc) 2360 { 2361 struct mbuf *m; 2362 struct ifnet *ifp; 2363 struct dc_desc *cur_rx; 2364 int i, total_len = 0; 2365 u_int32_t rxstat; 2366 2367 ifp = &sc->arpcom.ac_if; 2368 i = sc->dc_cdata.dc_rx_prod; 2369 2370 while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { 2371 2372 #ifdef DEVICE_POLLING 2373 if (ifp->if_flags & IFF_POLLING) { 2374 if (sc->rxcycles <= 0) 2375 break; 2376 sc->rxcycles--; 2377 } 2378 #endif /* DEVICE_POLLING */ 2379 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2380 rxstat = cur_rx->dc_status; 2381 m = sc->dc_cdata.dc_rx_chain[i]; 2382 total_len = DC_RXBYTES(rxstat); 2383 2384 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2385 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2386 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2387 sc->dc_pnic_rx_bug_save = i; 2388 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2389 DC_INC(i, DC_RX_LIST_CNT); 2390 continue; 2391 } 2392 dc_pnic_rx_bug_war(sc, i); 2393 rxstat = cur_rx->dc_status; 2394 total_len = DC_RXBYTES(rxstat); 2395 } 2396 } 2397 2398 sc->dc_cdata.dc_rx_chain[i] = NULL; 2399 2400 /* 2401 * If an error occurs, update stats, clear the 2402 * status word and leave the mbuf cluster in place: 2403 * it should simply get re-used next time this descriptor 2404 * comes up in the ring. However, don't report long 2405 * frames as errors since they could be vlans 2406 */ 2407 if ((rxstat & DC_RXSTAT_RXERR)){ 2408 if (!(rxstat & DC_RXSTAT_GIANT) || 2409 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2410 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2411 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2412 ifp->if_ierrors++; 2413 if (rxstat & DC_RXSTAT_COLLSEEN) 2414 ifp->if_collisions++; 2415 dc_newbuf(sc, i, m); 2416 if (rxstat & DC_RXSTAT_CRCERR) { 2417 DC_INC(i, DC_RX_LIST_CNT); 2418 continue; 2419 } else { 2420 dc_init(sc); 2421 return; 2422 } 2423 } 2424 } 2425 2426 /* No errors; receive the packet. */ 2427 total_len -= ETHER_CRC_LEN; 2428 2429 #ifdef __i386__ 2430 /* 2431 * On the x86 we do not have alignment problems, so try to 2432 * allocate a new buffer for the receive ring, and pass up 2433 * the one where the packet is already, saving the expensive 2434 * copy done in m_devget(). 2435 * If we are on an architecture with alignment problems, or 2436 * if the allocation fails, then use m_devget and leave the 2437 * existing buffer in the receive ring. 2438 */ 2439 if (dc_quick && dc_newbuf(sc, i, NULL) == 0) { 2440 m->m_pkthdr.rcvif = ifp; 2441 m->m_pkthdr.len = m->m_len = total_len; 2442 DC_INC(i, DC_RX_LIST_CNT); 2443 } else 2444 #endif 2445 { 2446 struct mbuf *m0; 2447 2448 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 2449 total_len + ETHER_ALIGN, 0, ifp, NULL); 2450 dc_newbuf(sc, i, m); 2451 DC_INC(i, DC_RX_LIST_CNT); 2452 if (m0 == NULL) { 2453 ifp->if_ierrors++; 2454 continue; 2455 } 2456 m_adj(m0, ETHER_ALIGN); 2457 m = m0; 2458 } 2459 2460 ifp->if_ipackets++; 2461 (*ifp->if_input)(ifp, m); 2462 } 2463 2464 sc->dc_cdata.dc_rx_prod = i; 2465 } 2466 2467 /* 2468 * A frame was downloaded to the chip. It's safe for us to clean up 2469 * the list buffers. 2470 */ 2471 2472 static void 2473 dc_txeof(struct dc_softc *sc) 2474 { 2475 struct dc_desc *cur_tx = NULL; 2476 struct ifnet *ifp; 2477 int idx; 2478 2479 ifp = &sc->arpcom.ac_if; 2480 2481 /* 2482 * Go through our tx list and free mbufs for those 2483 * frames that have been transmitted. 2484 */ 2485 idx = sc->dc_cdata.dc_tx_cons; 2486 while(idx != sc->dc_cdata.dc_tx_prod) { 2487 u_int32_t txstat; 2488 2489 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2490 txstat = cur_tx->dc_status; 2491 2492 if (txstat & DC_TXSTAT_OWN) 2493 break; 2494 2495 if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || 2496 cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2497 if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2498 /* 2499 * Yes, the PNIC is so brain damaged 2500 * that it will sometimes generate a TX 2501 * underrun error while DMAing the RX 2502 * filter setup frame. If we detect this, 2503 * we have to send the setup frame again, 2504 * or else the filter won't be programmed 2505 * correctly. 2506 */ 2507 if (DC_IS_PNIC(sc)) { 2508 if (txstat & DC_TXSTAT_ERRSUM) 2509 dc_setfilt(sc); 2510 } 2511 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2512 } 2513 sc->dc_cdata.dc_tx_cnt--; 2514 DC_INC(idx, DC_TX_LIST_CNT); 2515 continue; 2516 } 2517 2518 if (DC_IS_CONEXANT(sc)) { 2519 /* 2520 * For some reason Conexant chips like 2521 * setting the CARRLOST flag even when 2522 * the carrier is there. In CURRENT we 2523 * have the same problem for Xircom 2524 * cards ! 2525 */ 2526 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2527 sc->dc_pmode == DC_PMODE_MII && 2528 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2529 DC_TXSTAT_NOCARRIER))) 2530 txstat &= ~DC_TXSTAT_ERRSUM; 2531 } else { 2532 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2533 sc->dc_pmode == DC_PMODE_MII && 2534 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2535 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2536 txstat &= ~DC_TXSTAT_ERRSUM; 2537 } 2538 2539 if (txstat & DC_TXSTAT_ERRSUM) { 2540 ifp->if_oerrors++; 2541 if (txstat & DC_TXSTAT_EXCESSCOLL) 2542 ifp->if_collisions++; 2543 if (txstat & DC_TXSTAT_LATECOLL) 2544 ifp->if_collisions++; 2545 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2546 dc_init(sc); 2547 return; 2548 } 2549 } 2550 2551 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2552 2553 ifp->if_opackets++; 2554 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2555 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2556 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2557 } 2558 2559 sc->dc_cdata.dc_tx_cnt--; 2560 DC_INC(idx, DC_TX_LIST_CNT); 2561 } 2562 2563 if (idx != sc->dc_cdata.dc_tx_cons) { 2564 /* some buffers have been freed */ 2565 sc->dc_cdata.dc_tx_cons = idx; 2566 ifp->if_flags &= ~IFF_OACTIVE; 2567 } 2568 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2569 2570 return; 2571 } 2572 2573 static void 2574 dc_tick(void *xsc) 2575 { 2576 struct dc_softc *sc = xsc; 2577 struct ifnet *ifp = &sc->arpcom.ac_if; 2578 struct mii_data *mii; 2579 u_int32_t r; 2580 2581 crit_enter(); 2582 2583 mii = device_get_softc(sc->dc_miibus); 2584 2585 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2586 if (sc->dc_flags & DC_21143_NWAY) { 2587 r = CSR_READ_4(sc, DC_10BTSTAT); 2588 if (IFM_SUBTYPE(mii->mii_media_active) == 2589 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2590 sc->dc_link = 0; 2591 mii_mediachg(mii); 2592 } 2593 if (IFM_SUBTYPE(mii->mii_media_active) == 2594 IFM_10_T && (r & DC_TSTAT_LS10)) { 2595 sc->dc_link = 0; 2596 mii_mediachg(mii); 2597 } 2598 if (sc->dc_link == 0) 2599 mii_tick(mii); 2600 } else { 2601 r = CSR_READ_4(sc, DC_ISR); 2602 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2603 sc->dc_cdata.dc_tx_cnt == 0) { 2604 mii_tick(mii); 2605 if (!(mii->mii_media_status & IFM_ACTIVE)) 2606 sc->dc_link = 0; 2607 } 2608 } 2609 } else 2610 mii_tick(mii); 2611 2612 /* 2613 * When the init routine completes, we expect to be able to send 2614 * packets right away, and in fact the network code will send a 2615 * gratuitous ARP the moment the init routine marks the interface 2616 * as running. However, even though the MAC may have been initialized, 2617 * there may be a delay of a few seconds before the PHY completes 2618 * autonegotiation and the link is brought up. Any transmissions 2619 * made during that delay will be lost. Dealing with this is tricky: 2620 * we can't just pause in the init routine while waiting for the 2621 * PHY to come ready since that would bring the whole system to 2622 * a screeching halt for several seconds. 2623 * 2624 * What we do here is prevent the TX start routine from sending 2625 * any packets until a link has been established. After the 2626 * interface has been initialized, the tick routine will poll 2627 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2628 * that time, packets will stay in the send queue, and once the 2629 * link comes up, they will be flushed out to the wire. 2630 */ 2631 if (!sc->dc_link) { 2632 mii_pollstat(mii); 2633 if (mii->mii_media_status & IFM_ACTIVE && 2634 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2635 sc->dc_link++; 2636 if (!ifq_is_empty(&ifp->if_snd)) 2637 dc_start(ifp); 2638 } 2639 } 2640 2641 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2642 callout_reset(&sc->dc_stat_timer, hz / 10, dc_tick, sc); 2643 else 2644 callout_reset(&sc->dc_stat_timer, hz, dc_tick, sc); 2645 2646 crit_exit(); 2647 } 2648 2649 /* 2650 * A transmit underrun has occurred. Back off the transmit threshold, 2651 * or switch to store and forward mode if we have to. 2652 */ 2653 static void 2654 dc_tx_underrun(struct dc_softc *sc) 2655 { 2656 u_int32_t isr; 2657 int i; 2658 2659 if (DC_IS_DAVICOM(sc)) 2660 dc_init(sc); 2661 2662 if (DC_IS_INTEL(sc)) { 2663 /* 2664 * The real 21143 requires that the transmitter be idle 2665 * in order to change the transmit threshold or store 2666 * and forward state. 2667 */ 2668 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2669 2670 for (i = 0; i < DC_TIMEOUT; i++) { 2671 isr = CSR_READ_4(sc, DC_ISR); 2672 if (isr & DC_ISR_TX_IDLE) 2673 break; 2674 DELAY(10); 2675 } 2676 if (i == DC_TIMEOUT) { 2677 if_printf(&sc->arpcom.ac_if, 2678 "failed to force tx to idle state\n"); 2679 dc_init(sc); 2680 } 2681 } 2682 2683 if_printf(&sc->arpcom.ac_if, "TX underrun -- "); 2684 sc->dc_txthresh += DC_TXTHRESH_INC; 2685 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2686 printf("using store and forward mode\n"); 2687 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2688 } else { 2689 printf("increasing TX threshold\n"); 2690 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2691 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2692 } 2693 2694 if (DC_IS_INTEL(sc)) 2695 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2696 2697 return; 2698 } 2699 2700 #ifdef DEVICE_POLLING 2701 2702 static void 2703 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2704 { 2705 struct dc_softc *sc = ifp->if_softc; 2706 u_int32_t status; 2707 2708 switch(cmd) { 2709 case POLL_REGISTER: 2710 /* Disable interrupts */ 2711 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2712 break; 2713 case POLL_DEREGISTER: 2714 /* Re-enable interrupts. */ 2715 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2716 break; 2717 case POLL_ONLY: 2718 sc->rxcycles = count; 2719 dc_rxeof(sc); 2720 dc_txeof(sc); 2721 if ((ifp->if_flags & IFF_OACTIVE) == 0 && !ifq_is_empty(&ifp->if_snd)) 2722 dc_start(ifp); 2723 break; 2724 case POLL_AND_CHECK_STATUS: 2725 sc->rxcycles = count; 2726 dc_rxeof(sc); 2727 dc_txeof(sc); 2728 if ((ifp->if_flags & IFF_OACTIVE) == 0 && !ifq_is_empty(&ifp->if_snd)) 2729 dc_start(ifp); 2730 status = CSR_READ_4(sc, DC_ISR); 2731 status &= (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF| 2732 DC_ISR_TX_NOBUF|DC_ISR_TX_IDLE|DC_ISR_TX_UNDERRUN| 2733 DC_ISR_BUS_ERR); 2734 if (!status) 2735 break; 2736 /* ack what we have */ 2737 CSR_WRITE_4(sc, DC_ISR, status); 2738 2739 if (status & (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF) ) { 2740 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 2741 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 2742 2743 if (dc_rx_resync(sc)) 2744 dc_rxeof(sc); 2745 } 2746 /* restart transmit unit if necessary */ 2747 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 2748 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2749 2750 if (status & DC_ISR_TX_UNDERRUN) 2751 dc_tx_underrun(sc); 2752 2753 if (status & DC_ISR_BUS_ERR) { 2754 if_printf(ifp, "dc_poll: bus error\n"); 2755 dc_reset(sc); 2756 dc_init(sc); 2757 } 2758 break; 2759 } 2760 } 2761 #endif /* DEVICE_POLLING */ 2762 2763 static void 2764 dc_intr(void *arg) 2765 { 2766 struct dc_softc *sc; 2767 struct ifnet *ifp; 2768 u_int32_t status; 2769 2770 sc = arg; 2771 2772 if (sc->suspended) { 2773 return; 2774 } 2775 2776 ifp = &sc->arpcom.ac_if; 2777 2778 if ( (CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 2779 return ; 2780 2781 /* Suppress unwanted interrupts */ 2782 if (!(ifp->if_flags & IFF_UP)) { 2783 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 2784 dc_stop(sc); 2785 return; 2786 } 2787 2788 /* Disable interrupts. */ 2789 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2790 2791 while((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) { 2792 2793 CSR_WRITE_4(sc, DC_ISR, status); 2794 2795 if (status & DC_ISR_RX_OK) { 2796 int curpkts; 2797 curpkts = ifp->if_ipackets; 2798 dc_rxeof(sc); 2799 if (curpkts == ifp->if_ipackets) { 2800 while(dc_rx_resync(sc)) 2801 dc_rxeof(sc); 2802 } 2803 } 2804 2805 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 2806 dc_txeof(sc); 2807 2808 if (status & DC_ISR_TX_IDLE) { 2809 dc_txeof(sc); 2810 if (sc->dc_cdata.dc_tx_cnt) { 2811 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2812 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2813 } 2814 } 2815 2816 if (status & DC_ISR_TX_UNDERRUN) 2817 dc_tx_underrun(sc); 2818 2819 if ((status & DC_ISR_RX_WATDOGTIMEO) 2820 || (status & DC_ISR_RX_NOBUF)) { 2821 int curpkts; 2822 curpkts = ifp->if_ipackets; 2823 dc_rxeof(sc); 2824 if (curpkts == ifp->if_ipackets) { 2825 while(dc_rx_resync(sc)) 2826 dc_rxeof(sc); 2827 } 2828 } 2829 2830 if (status & DC_ISR_BUS_ERR) { 2831 dc_reset(sc); 2832 dc_init(sc); 2833 } 2834 } 2835 2836 /* Re-enable interrupts. */ 2837 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2838 2839 if (!ifq_is_empty(&ifp->if_snd)) 2840 dc_start(ifp); 2841 2842 return; 2843 } 2844 2845 /* 2846 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2847 * pointers to the fragment pointers. 2848 */ 2849 static int 2850 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 2851 { 2852 struct dc_desc *f = NULL; 2853 struct mbuf *m; 2854 int frag, cur, cnt = 0; 2855 2856 /* 2857 * Start packing the mbufs in this chain into 2858 * the fragment pointers. Stop when we run out 2859 * of fragments or hit the end of the mbuf chain. 2860 */ 2861 m = m_head; 2862 cur = frag = *txidx; 2863 2864 for (m = m_head; m != NULL; m = m->m_next) { 2865 if (m->m_len != 0) { 2866 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 2867 if (*txidx != sc->dc_cdata.dc_tx_prod && 2868 frag == (DC_TX_LIST_CNT - 1)) 2869 return(ENOBUFS); 2870 } 2871 if ((DC_TX_LIST_CNT - 2872 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) 2873 return(ENOBUFS); 2874 2875 f = &sc->dc_ldata->dc_tx_list[frag]; 2876 f->dc_ctl = DC_TXCTL_TLINK | m->m_len; 2877 if (cnt == 0) { 2878 f->dc_status = 0; 2879 f->dc_ctl |= DC_TXCTL_FIRSTFRAG; 2880 } else 2881 f->dc_status = DC_TXSTAT_OWN; 2882 f->dc_data = vtophys(mtod(m, vm_offset_t)); 2883 cur = frag; 2884 DC_INC(frag, DC_TX_LIST_CNT); 2885 cnt++; 2886 } 2887 } 2888 2889 if (m != NULL) 2890 return(ENOBUFS); 2891 2892 sc->dc_cdata.dc_tx_cnt += cnt; 2893 sc->dc_cdata.dc_tx_chain[cur] = m_head; 2894 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; 2895 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 2896 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; 2897 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 2898 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 2899 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 2900 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 2901 sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; 2902 *txidx = frag; 2903 2904 return(0); 2905 } 2906 2907 /* 2908 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2909 * to the mbuf data regions directly in the transmit lists. We also save a 2910 * copy of the pointers since the transmit list fragment pointers are 2911 * physical addresses. 2912 */ 2913 2914 static void 2915 dc_start(struct ifnet *ifp) 2916 { 2917 struct dc_softc *sc; 2918 struct mbuf *m_head = NULL, *m_new; 2919 int did_defrag, idx; 2920 2921 sc = ifp->if_softc; 2922 2923 if (!sc->dc_link) 2924 return; 2925 2926 if (ifp->if_flags & IFF_OACTIVE) 2927 return; 2928 2929 idx = sc->dc_cdata.dc_tx_prod; 2930 2931 while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { 2932 did_defrag = 0; 2933 m_head = ifq_poll(&ifp->if_snd); 2934 if (m_head == NULL) 2935 break; 2936 2937 if (sc->dc_flags & DC_TX_COALESCE && 2938 (m_head->m_next != NULL || 2939 sc->dc_flags & DC_TX_ALIGN)){ 2940 /* 2941 * Check first if coalescing allows us to queue 2942 * the packet. We don't want to loose it if 2943 * the TX queue is full. 2944 */ 2945 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 2946 idx != sc->dc_cdata.dc_tx_prod && 2947 idx == (DC_TX_LIST_CNT - 1)) { 2948 ifp->if_flags |= IFF_OACTIVE; 2949 break; 2950 } 2951 if ((DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt) < 5) { 2952 ifp->if_flags |= IFF_OACTIVE; 2953 break; 2954 } 2955 2956 /* only coalesce if have >1 mbufs */ 2957 m_new = m_defrag_nofree(m_head, MB_DONTWAIT); 2958 if (m_new == NULL) { 2959 ifp->if_flags |= IFF_OACTIVE; 2960 break; 2961 } 2962 m_freem(m_head); 2963 m_head = m_new; 2964 did_defrag = 1; 2965 } 2966 2967 if (dc_encap(sc, m_head, &idx)) { 2968 if (did_defrag) { 2969 m_freem(m_head); 2970 m_new = ifq_dequeue(&ifp->if_snd); 2971 m_freem(m_new); 2972 } 2973 ifp->if_flags |= IFF_OACTIVE; 2974 break; 2975 } 2976 2977 m_new = ifq_dequeue(&ifp->if_snd); 2978 if (did_defrag) 2979 m_freem(m_new); 2980 2981 /* 2982 * If there's a BPF listener, bounce a copy of this frame 2983 * to him. 2984 */ 2985 BPF_MTAP(ifp, m_head); 2986 2987 if (sc->dc_flags & DC_TX_ONE) { 2988 ifp->if_flags |= IFF_OACTIVE; 2989 break; 2990 } 2991 } 2992 2993 /* Transmit */ 2994 sc->dc_cdata.dc_tx_prod = idx; 2995 if (!(sc->dc_flags & DC_TX_POLL)) 2996 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2997 2998 /* 2999 * Set a timeout in case the chip goes out to lunch. 3000 */ 3001 ifp->if_timer = 5; 3002 3003 return; 3004 } 3005 3006 static void 3007 dc_init(void *xsc) 3008 { 3009 struct dc_softc *sc = xsc; 3010 struct ifnet *ifp = &sc->arpcom.ac_if; 3011 struct mii_data *mii; 3012 3013 crit_enter(); 3014 3015 mii = device_get_softc(sc->dc_miibus); 3016 3017 /* 3018 * Cancel pending I/O and free all RX/TX buffers. 3019 */ 3020 dc_stop(sc); 3021 dc_reset(sc); 3022 3023 /* 3024 * Set cache alignment and burst length. 3025 */ 3026 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3027 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3028 else 3029 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 3030 /* 3031 * Evenly share the bus between receive and transmit process. 3032 */ 3033 if (DC_IS_INTEL(sc)) 3034 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3035 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3036 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3037 } else { 3038 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3039 } 3040 if (sc->dc_flags & DC_TX_POLL) 3041 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3042 switch(sc->dc_cachesize) { 3043 case 32: 3044 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3045 break; 3046 case 16: 3047 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3048 break; 3049 case 8: 3050 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3051 break; 3052 case 0: 3053 default: 3054 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3055 break; 3056 } 3057 3058 if (sc->dc_flags & DC_TX_STORENFWD) 3059 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3060 else { 3061 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3062 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3063 } else { 3064 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3065 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3066 } 3067 } 3068 3069 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3070 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3071 3072 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3073 /* 3074 * The app notes for the 98713 and 98715A say that 3075 * in order to have the chips operate properly, a magic 3076 * number must be written to CSR16. Macronix does not 3077 * document the meaning of these bits so there's no way 3078 * to know exactly what they do. The 98713 has a magic 3079 * number all its own; the rest all use a different one. 3080 */ 3081 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3082 if (sc->dc_type == DC_TYPE_98713) 3083 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3084 else 3085 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3086 } 3087 3088 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3089 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3090 3091 /* Init circular RX list. */ 3092 if (dc_list_rx_init(sc) == ENOBUFS) { 3093 if_printf(ifp, "initialization failed: no " 3094 "memory for rx buffers\n"); 3095 dc_stop(sc); 3096 crit_exit(); 3097 return; 3098 } 3099 3100 /* 3101 * Init tx descriptors. 3102 */ 3103 dc_list_tx_init(sc); 3104 3105 /* 3106 * Load the address of the RX list. 3107 */ 3108 CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); 3109 CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); 3110 3111 /* 3112 * Enable interrupts. 3113 */ 3114 #ifdef DEVICE_POLLING 3115 /* 3116 * ... but only if we are not polling, and make sure they are off in 3117 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3118 * after a reset. 3119 */ 3120 if (ifp->if_flags & IFF_POLLING) 3121 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3122 else 3123 #endif 3124 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3125 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3126 3127 /* Enable transmitter. */ 3128 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3129 3130 /* 3131 * If this is an Intel 21143 and we're not using the 3132 * MII port, program the LED control pins so we get 3133 * link and activity indications. 3134 */ 3135 if (sc->dc_flags & DC_TULIP_LEDS) { 3136 CSR_WRITE_4(sc, DC_WATCHDOG, 3137 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 3138 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3139 } 3140 3141 /* 3142 * Load the RX/multicast filter. We do this sort of late 3143 * because the filter programming scheme on the 21143 and 3144 * some clones requires DMAing a setup frame via the TX 3145 * engine, and we need the transmitter enabled for that. 3146 */ 3147 dc_setfilt(sc); 3148 3149 /* Enable receiver. */ 3150 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3151 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3152 3153 mii_mediachg(mii); 3154 dc_setcfg(sc, sc->dc_if_media); 3155 3156 ifp->if_flags |= IFF_RUNNING; 3157 ifp->if_flags &= ~IFF_OACTIVE; 3158 3159 crit_exit(); 3160 3161 /* Don't start the ticker if this is a homePNA link. */ 3162 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3163 sc->dc_link = 1; 3164 else { 3165 if (sc->dc_flags & DC_21143_NWAY) 3166 callout_reset(&sc->dc_stat_timer, hz/10, dc_tick, sc); 3167 else 3168 callout_reset(&sc->dc_stat_timer, hz, dc_tick, sc); 3169 } 3170 3171 return; 3172 } 3173 3174 /* 3175 * Set media options. 3176 */ 3177 static int 3178 dc_ifmedia_upd(struct ifnet *ifp) 3179 { 3180 struct dc_softc *sc; 3181 struct mii_data *mii; 3182 struct ifmedia *ifm; 3183 3184 sc = ifp->if_softc; 3185 mii = device_get_softc(sc->dc_miibus); 3186 mii_mediachg(mii); 3187 ifm = &mii->mii_media; 3188 3189 if (DC_IS_DAVICOM(sc) && 3190 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3191 dc_setcfg(sc, ifm->ifm_media); 3192 else 3193 sc->dc_link = 0; 3194 3195 return(0); 3196 } 3197 3198 /* 3199 * Report current media status. 3200 */ 3201 static void 3202 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3203 { 3204 struct dc_softc *sc; 3205 struct mii_data *mii; 3206 struct ifmedia *ifm; 3207 3208 sc = ifp->if_softc; 3209 mii = device_get_softc(sc->dc_miibus); 3210 mii_pollstat(mii); 3211 ifm = &mii->mii_media; 3212 if (DC_IS_DAVICOM(sc)) { 3213 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3214 ifmr->ifm_active = ifm->ifm_media; 3215 ifmr->ifm_status = 0; 3216 return; 3217 } 3218 } 3219 ifmr->ifm_active = mii->mii_media_active; 3220 ifmr->ifm_status = mii->mii_media_status; 3221 3222 return; 3223 } 3224 3225 static int 3226 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3227 { 3228 struct dc_softc *sc = ifp->if_softc; 3229 struct ifreq *ifr = (struct ifreq *) data; 3230 struct mii_data *mii; 3231 int error = 0; 3232 3233 crit_enter(); 3234 3235 switch(command) { 3236 case SIOCSIFFLAGS: 3237 if (ifp->if_flags & IFF_UP) { 3238 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3239 (IFF_PROMISC | IFF_ALLMULTI); 3240 if (ifp->if_flags & IFF_RUNNING) { 3241 if (need_setfilt) 3242 dc_setfilt(sc); 3243 } else { 3244 sc->dc_txthresh = 0; 3245 dc_init(sc); 3246 } 3247 } else { 3248 if (ifp->if_flags & IFF_RUNNING) 3249 dc_stop(sc); 3250 } 3251 sc->dc_if_flags = ifp->if_flags; 3252 error = 0; 3253 break; 3254 case SIOCADDMULTI: 3255 case SIOCDELMULTI: 3256 dc_setfilt(sc); 3257 error = 0; 3258 break; 3259 case SIOCGIFMEDIA: 3260 case SIOCSIFMEDIA: 3261 mii = device_get_softc(sc->dc_miibus); 3262 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3263 break; 3264 default: 3265 error = ether_ioctl(ifp, command, data); 3266 break; 3267 } 3268 3269 crit_exit(); 3270 3271 return(error); 3272 } 3273 3274 static void 3275 dc_watchdog(struct ifnet *ifp) 3276 { 3277 struct dc_softc *sc; 3278 3279 sc = ifp->if_softc; 3280 3281 ifp->if_oerrors++; 3282 if_printf(ifp, "watchdog timeout\n"); 3283 3284 dc_stop(sc); 3285 dc_reset(sc); 3286 dc_init(sc); 3287 3288 if (!ifq_is_empty(&ifp->if_snd)) 3289 dc_start(ifp); 3290 3291 return; 3292 } 3293 3294 /* 3295 * Stop the adapter and free any mbufs allocated to the 3296 * RX and TX lists. 3297 */ 3298 static void 3299 dc_stop(struct dc_softc *sc) 3300 { 3301 int i; 3302 struct ifnet *ifp; 3303 3304 ifp = &sc->arpcom.ac_if; 3305 ifp->if_timer = 0; 3306 3307 callout_stop(&sc->dc_stat_timer); 3308 3309 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3310 3311 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3312 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3313 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3314 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3315 sc->dc_link = 0; 3316 3317 /* 3318 * Free data in the RX lists. 3319 */ 3320 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3321 if (sc->dc_cdata.dc_rx_chain[i] != NULL) { 3322 m_freem(sc->dc_cdata.dc_rx_chain[i]); 3323 sc->dc_cdata.dc_rx_chain[i] = NULL; 3324 } 3325 } 3326 bzero((char *)&sc->dc_ldata->dc_rx_list, 3327 sizeof(sc->dc_ldata->dc_rx_list)); 3328 3329 /* 3330 * Free the TX list buffers. 3331 */ 3332 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3333 if (sc->dc_cdata.dc_tx_chain[i] != NULL) { 3334 if ((sc->dc_ldata->dc_tx_list[i].dc_ctl & 3335 DC_TXCTL_SETUP) || 3336 !(sc->dc_ldata->dc_tx_list[i].dc_ctl & 3337 DC_TXCTL_LASTFRAG)) { 3338 sc->dc_cdata.dc_tx_chain[i] = NULL; 3339 continue; 3340 } 3341 m_freem(sc->dc_cdata.dc_tx_chain[i]); 3342 sc->dc_cdata.dc_tx_chain[i] = NULL; 3343 } 3344 } 3345 3346 bzero((char *)&sc->dc_ldata->dc_tx_list, 3347 sizeof(sc->dc_ldata->dc_tx_list)); 3348 3349 return; 3350 } 3351 3352 /* 3353 * Stop all chip I/O so that the kernel's probe routines don't 3354 * get confused by errant DMAs when rebooting. 3355 */ 3356 static void 3357 dc_shutdown(device_t dev) 3358 { 3359 struct dc_softc *sc; 3360 3361 sc = device_get_softc(dev); 3362 3363 dc_stop(sc); 3364 3365 return; 3366 } 3367 3368 /* 3369 * Device suspend routine. Stop the interface and save some PCI 3370 * settings in case the BIOS doesn't restore them properly on 3371 * resume. 3372 */ 3373 static int 3374 dc_suspend(device_t dev) 3375 { 3376 struct dc_softc *sc = device_get_softc(dev); 3377 int i; 3378 3379 crit_enter(); 3380 3381 dc_stop(sc); 3382 3383 for (i = 0; i < 5; i++) 3384 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3385 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3386 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3387 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3388 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3389 3390 sc->suspended = 1; 3391 3392 crit_exit(); 3393 return (0); 3394 } 3395 3396 /* 3397 * Device resume routine. Restore some PCI settings in case the BIOS 3398 * doesn't, re-enable busmastering, and restart the interface if 3399 * appropriate. 3400 */ 3401 static int 3402 dc_resume(device_t dev) 3403 { 3404 struct dc_softc *sc = device_get_softc(dev); 3405 struct ifnet *ifp = &sc->arpcom.ac_if; 3406 int i; 3407 3408 crit_enter(); 3409 3410 dc_acpi(dev); 3411 3412 /* better way to do this? */ 3413 for (i = 0; i < 5; i++) 3414 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3415 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3416 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3417 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3418 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3419 3420 /* reenable busmastering */ 3421 pci_enable_busmaster(dev); 3422 pci_enable_io(dev, DC_RES); 3423 3424 /* reinitialize interface if necessary */ 3425 if (ifp->if_flags & IFF_UP) 3426 dc_init(sc); 3427 3428 sc->suspended = 0; 3429 3430 crit_exit(); 3431 return (0); 3432 } 3433