1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_dc.c,v 1.9.2.45 2003/06/08 14:31:53 mux Exp $ 33 * $DragonFly: src/sys/dev/netif/dc/if_dc.c,v 1.52 2006/10/10 11:34:55 sephe Exp $ 34 */ 35 36 /* 37 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 38 * series chips and several workalikes including the following: 39 * 40 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 41 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 42 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 43 * ASIX Electronics AX88140A (www.asix.com.tw) 44 * ASIX Electronics AX88141 (www.asix.com.tw) 45 * ADMtek AL981 (www.admtek.com.tw) 46 * ADMtek AN985 (www.admtek.com.tw) 47 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 49 * Accton EN1217 (www.accton.com) 50 * Xircom X3201 (www.xircom.com) 51 * Conexant LANfinity (www.conexant.com) 52 * 53 * Datasheets for the 21143 are available at developer.intel.com. 54 * Datasheets for the clone parts can be found at their respective sites. 55 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 56 * The PNIC II is essentially a Macronix 98715A chip; the only difference 57 * worth noting is that its multicast hash table is only 128 bits wide 58 * instead of 512. 59 * 60 * Written by Bill Paul <wpaul@ee.columbia.edu> 61 * Electrical Engineering Department 62 * Columbia University, New York City 63 */ 64 65 /* 66 * The Intel 21143 is the successor to the DEC 21140. It is basically 67 * the same as the 21140 but with a few new features. The 21143 supports 68 * three kinds of media attachments: 69 * 70 * o MII port, for 10Mbps and 100Mbps support and NWAY 71 * autonegotiation provided by an external PHY. 72 * o SYM port, for symbol mode 100Mbps support. 73 * o 10baseT port. 74 * o AUI/BNC port. 75 * 76 * The 100Mbps SYM port and 10baseT port can be used together in 77 * combination with the internal NWAY support to create a 10/100 78 * autosensing configuration. 79 * 80 * Note that not all tulip workalikes are handled in this driver: we only 81 * deal with those which are relatively well behaved. The Winbond is 82 * handled separately due to its different register offsets and the 83 * special handling needed for its various bugs. The PNIC is handled 84 * here, but I'm not thrilled about it. 85 * 86 * All of the workalike chips use some form of MII transceiver support 87 * with the exception of the Macronix chips, which also have a SYM port. 88 * The ASIX AX88140A is also documented to have a SYM port, but all 89 * the cards I've seen use an MII transceiver, probably because the 90 * AX88140A doesn't support internal NWAY. 91 */ 92 93 #include "opt_polling.h" 94 95 #include <sys/param.h> 96 #include <sys/systm.h> 97 #include <sys/sockio.h> 98 #include <sys/mbuf.h> 99 #include <sys/malloc.h> 100 #include <sys/kernel.h> 101 #include <sys/socket.h> 102 #include <sys/sysctl.h> 103 #include <sys/thread2.h> 104 105 #include <net/if.h> 106 #include <net/ifq_var.h> 107 #include <net/if_arp.h> 108 #include <net/ethernet.h> 109 #include <net/if_dl.h> 110 #include <net/if_media.h> 111 #include <net/if_types.h> 112 #include <net/vlan/if_vlan_var.h> 113 114 #include <net/bpf.h> 115 116 #include <vm/vm.h> /* for vtophys */ 117 #include <vm/pmap.h> /* for vtophys */ 118 #include <machine/bus_pio.h> 119 #include <machine/bus_memio.h> 120 #include <machine/bus.h> 121 #include <machine/resource.h> 122 #include <sys/bus.h> 123 #include <sys/rman.h> 124 125 #include "../mii_layer/mii.h" 126 #include "../mii_layer/miivar.h" 127 128 #include <bus/pci/pcireg.h> 129 #include <bus/pci/pcivar.h> 130 131 #define DC_USEIOSPACE 132 133 #include "if_dcreg.h" 134 135 /* "controller miibus0" required. See GENERIC if you get errors here. */ 136 #include "miibus_if.h" 137 138 /* 139 * Various supported device vendors/types and their names. 140 */ 141 static const struct dc_type dc_devs[] = { 142 { DC_VENDORID_DEC, DC_DEVICEID_21143, 143 "Intel 21143 10/100BaseTX" }, 144 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 145 "Davicom DM9009 10/100BaseTX" }, 146 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 147 "Davicom DM9100 10/100BaseTX" }, 148 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 149 "Davicom DM9102 10/100BaseTX" }, 150 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 151 "Davicom DM9102A 10/100BaseTX" }, 152 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 153 "ADMtek AL981 10/100BaseTX" }, 154 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 155 "ADMtek AN985 10/100BaseTX" }, 156 { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511, 157 "Netgear FA511 10/100BaseTX" }, 158 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, 159 "ADMtek ADM9511 10/100BaseTX" }, 160 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, 161 "ADMtek ADM9513 10/100BaseTX" }, 162 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 163 "ASIX AX88140A 10/100BaseTX" }, 164 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 165 "ASIX AX88141 10/100BaseTX" }, 166 { DC_VENDORID_MX, DC_DEVICEID_98713, 167 "Macronix 98713 10/100BaseTX" }, 168 { DC_VENDORID_MX, DC_DEVICEID_98713, 169 "Macronix 98713A 10/100BaseTX" }, 170 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 171 "Compex RL100-TX 10/100BaseTX" }, 172 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 173 "Compex RL100-TX 10/100BaseTX" }, 174 { DC_VENDORID_MX, DC_DEVICEID_987x5, 175 "Macronix 98715/98715A 10/100BaseTX" }, 176 { DC_VENDORID_MX, DC_DEVICEID_987x5, 177 "Macronix 98715AEC-C 10/100BaseTX" }, 178 { DC_VENDORID_MX, DC_DEVICEID_987x5, 179 "Macronix 98725 10/100BaseTX" }, 180 { DC_VENDORID_MX, DC_DEVICEID_98727, 181 "Macronix 98727/98732 10/100BaseTX" }, 182 { DC_VENDORID_LO, DC_DEVICEID_82C115, 183 "LC82C115 PNIC II 10/100BaseTX" }, 184 { DC_VENDORID_LO, DC_DEVICEID_82C168, 185 "82c168 PNIC 10/100BaseTX" }, 186 { DC_VENDORID_LO, DC_DEVICEID_82C168, 187 "82c169 PNIC 10/100BaseTX" }, 188 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 189 "Accton EN1217 10/100BaseTX" }, 190 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 191 "Accton EN2242 MiniPCI 10/100BaseTX" }, 192 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 193 "Xircom X3201 10/100BaseTX" }, 194 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 195 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 196 { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, 197 "3Com OfficeConnect 10/100B" }, 198 { 0, 0, NULL } 199 }; 200 201 static int dc_probe (device_t); 202 static int dc_attach (device_t); 203 static int dc_detach (device_t); 204 static int dc_suspend (device_t); 205 static int dc_resume (device_t); 206 static void dc_acpi (device_t); 207 static const struct dc_type *dc_devtype (device_t); 208 static int dc_newbuf (struct dc_softc *, int, struct mbuf *); 209 static int dc_encap (struct dc_softc *, struct mbuf *, 210 u_int32_t *); 211 static void dc_pnic_rx_bug_war (struct dc_softc *, int); 212 static int dc_rx_resync (struct dc_softc *); 213 static void dc_rxeof (struct dc_softc *); 214 static void dc_txeof (struct dc_softc *); 215 static void dc_tick (void *); 216 static void dc_tx_underrun (struct dc_softc *); 217 static void dc_intr (void *); 218 static void dc_start (struct ifnet *); 219 static int dc_ioctl (struct ifnet *, u_long, caddr_t, 220 struct ucred *); 221 #ifdef DEVICE_POLLING 222 static void dc_poll (struct ifnet *ifp, enum poll_cmd cmd, 223 int count); 224 #endif 225 static void dc_init (void *); 226 static void dc_stop (struct dc_softc *); 227 static void dc_watchdog (struct ifnet *); 228 static void dc_shutdown (device_t); 229 static int dc_ifmedia_upd (struct ifnet *); 230 static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 231 232 static void dc_delay (struct dc_softc *); 233 static void dc_eeprom_idle (struct dc_softc *); 234 static void dc_eeprom_putbyte (struct dc_softc *, int); 235 static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 236 static void dc_eeprom_getword_pnic 237 (struct dc_softc *, int, u_int16_t *); 238 static void dc_eeprom_getword_xircom 239 (struct dc_softc *, int, u_int16_t *); 240 static void dc_eeprom_width (struct dc_softc *); 241 static void dc_read_eeprom (struct dc_softc *, caddr_t, int, 242 int, int); 243 244 static void dc_mii_writebit (struct dc_softc *, int); 245 static int dc_mii_readbit (struct dc_softc *); 246 static void dc_mii_sync (struct dc_softc *); 247 static void dc_mii_send (struct dc_softc *, u_int32_t, int); 248 static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 249 static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 250 static int dc_miibus_readreg (device_t, int, int); 251 static int dc_miibus_writereg (device_t, int, int, int); 252 static void dc_miibus_statchg (device_t); 253 static void dc_miibus_mediainit (device_t); 254 255 static u_int32_t dc_crc_mask (struct dc_softc *); 256 static void dc_setcfg (struct dc_softc *, int); 257 static void dc_setfilt_21143 (struct dc_softc *); 258 static void dc_setfilt_asix (struct dc_softc *); 259 static void dc_setfilt_admtek (struct dc_softc *); 260 static void dc_setfilt_xircom (struct dc_softc *); 261 262 static void dc_setfilt (struct dc_softc *); 263 264 static void dc_reset (struct dc_softc *); 265 static int dc_list_rx_init (struct dc_softc *); 266 static int dc_list_tx_init (struct dc_softc *); 267 268 static void dc_read_srom (struct dc_softc *, int); 269 static void dc_parse_21143_srom (struct dc_softc *); 270 static void dc_decode_leaf_sia (struct dc_softc *, 271 struct dc_eblock_sia *); 272 static void dc_decode_leaf_mii (struct dc_softc *, 273 struct dc_eblock_mii *); 274 static void dc_decode_leaf_sym (struct dc_softc *, 275 struct dc_eblock_sym *); 276 static void dc_apply_fixup (struct dc_softc *, int); 277 static uint32_t dc_mchash_xircom(struct dc_softc *, const uint8_t *); 278 279 #ifdef DC_USEIOSPACE 280 #define DC_RES SYS_RES_IOPORT 281 #define DC_RID DC_PCI_CFBIO 282 #else 283 #define DC_RES SYS_RES_MEMORY 284 #define DC_RID DC_PCI_CFBMA 285 #endif 286 287 static device_method_t dc_methods[] = { 288 /* Device interface */ 289 DEVMETHOD(device_probe, dc_probe), 290 DEVMETHOD(device_attach, dc_attach), 291 DEVMETHOD(device_detach, dc_detach), 292 DEVMETHOD(device_suspend, dc_suspend), 293 DEVMETHOD(device_resume, dc_resume), 294 DEVMETHOD(device_shutdown, dc_shutdown), 295 296 /* bus interface */ 297 DEVMETHOD(bus_print_child, bus_generic_print_child), 298 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 299 300 /* MII interface */ 301 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 302 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 303 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 304 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 305 306 { 0, 0 } 307 }; 308 309 static driver_t dc_driver = { 310 "dc", 311 dc_methods, 312 sizeof(struct dc_softc) 313 }; 314 315 static devclass_t dc_devclass; 316 317 #ifdef __i386__ 318 static int dc_quick=1; 319 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, 320 &dc_quick,0,"do not mdevget in dc driver"); 321 #endif 322 323 DECLARE_DUMMY_MODULE(if_dc); 324 DRIVER_MODULE(if_dc, cardbus, dc_driver, dc_devclass, 0, 0); 325 DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, 0, 0); 326 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 327 328 #define DC_SETBIT(sc, reg, x) \ 329 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 330 331 #define DC_CLRBIT(sc, reg, x) \ 332 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 333 334 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 335 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 336 337 static void 338 dc_delay(struct dc_softc *sc) 339 { 340 int idx; 341 342 for (idx = (300 / 33) + 1; idx > 0; idx--) 343 CSR_READ_4(sc, DC_BUSCTL); 344 } 345 346 static void 347 dc_eeprom_width(struct dc_softc *sc) 348 { 349 int i; 350 351 /* Force EEPROM to idle state. */ 352 dc_eeprom_idle(sc); 353 354 /* Enter EEPROM access mode. */ 355 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 356 dc_delay(sc); 357 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 358 dc_delay(sc); 359 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 360 dc_delay(sc); 361 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 362 dc_delay(sc); 363 364 for (i = 3; i--;) { 365 if (6 & (1 << i)) 366 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 367 else 368 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 369 dc_delay(sc); 370 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 371 dc_delay(sc); 372 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 373 dc_delay(sc); 374 } 375 376 for (i = 1; i <= 12; i++) { 377 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 378 dc_delay(sc); 379 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 380 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 381 dc_delay(sc); 382 break; 383 } 384 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 385 dc_delay(sc); 386 } 387 388 /* Turn off EEPROM access mode. */ 389 dc_eeprom_idle(sc); 390 391 if (i < 4 || i > 12) 392 sc->dc_romwidth = 6; 393 else 394 sc->dc_romwidth = i; 395 396 /* Enter EEPROM access mode. */ 397 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 398 dc_delay(sc); 399 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 400 dc_delay(sc); 401 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 402 dc_delay(sc); 403 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 404 dc_delay(sc); 405 406 /* Turn off EEPROM access mode. */ 407 dc_eeprom_idle(sc); 408 } 409 410 static void 411 dc_eeprom_idle(struct dc_softc *sc) 412 { 413 int i; 414 415 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 416 dc_delay(sc); 417 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 418 dc_delay(sc); 419 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 420 dc_delay(sc); 421 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 422 dc_delay(sc); 423 424 for (i = 0; i < 25; i++) { 425 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 426 dc_delay(sc); 427 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 428 dc_delay(sc); 429 } 430 431 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 432 dc_delay(sc); 433 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 434 dc_delay(sc); 435 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 436 437 return; 438 } 439 440 /* 441 * Send a read command and address to the EEPROM, check for ACK. 442 */ 443 static void 444 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 445 { 446 int d, i; 447 448 d = DC_EECMD_READ >> 6; 449 for (i = 3; i--; ) { 450 if (d & (1 << i)) 451 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 452 else 453 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 454 dc_delay(sc); 455 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 456 dc_delay(sc); 457 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 458 dc_delay(sc); 459 } 460 461 /* 462 * Feed in each bit and strobe the clock. 463 */ 464 for (i = sc->dc_romwidth; i--;) { 465 if (addr & (1 << i)) { 466 SIO_SET(DC_SIO_EE_DATAIN); 467 } else { 468 SIO_CLR(DC_SIO_EE_DATAIN); 469 } 470 dc_delay(sc); 471 SIO_SET(DC_SIO_EE_CLK); 472 dc_delay(sc); 473 SIO_CLR(DC_SIO_EE_CLK); 474 dc_delay(sc); 475 } 476 477 return; 478 } 479 480 /* 481 * Read a word of data stored in the EEPROM at address 'addr.' 482 * The PNIC 82c168/82c169 has its own non-standard way to read 483 * the EEPROM. 484 */ 485 static void 486 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 487 { 488 int i; 489 u_int32_t r; 490 491 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 492 493 for (i = 0; i < DC_TIMEOUT; i++) { 494 DELAY(1); 495 r = CSR_READ_4(sc, DC_SIO); 496 if (!(r & DC_PN_SIOCTL_BUSY)) { 497 *dest = (u_int16_t)(r & 0xFFFF); 498 return; 499 } 500 } 501 502 return; 503 } 504 505 /* 506 * Read a word of data stored in the EEPROM at address 'addr.' 507 * The Xircom X3201 has its own non-standard way to read 508 * the EEPROM, too. 509 */ 510 static void 511 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 512 { 513 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 514 515 addr *= 2; 516 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 517 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff; 518 addr += 1; 519 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 520 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff) << 8; 521 522 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 523 } 524 525 /* 526 * Read a word of data stored in the EEPROM at address 'addr.' 527 */ 528 static void 529 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 530 { 531 int i; 532 u_int16_t word = 0; 533 534 /* Force EEPROM to idle state. */ 535 dc_eeprom_idle(sc); 536 537 /* Enter EEPROM access mode. */ 538 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 539 dc_delay(sc); 540 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 541 dc_delay(sc); 542 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 543 dc_delay(sc); 544 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 545 dc_delay(sc); 546 547 /* 548 * Send address of word we want to read. 549 */ 550 dc_eeprom_putbyte(sc, addr); 551 552 /* 553 * Start reading bits from EEPROM. 554 */ 555 for (i = 0x8000; i; i >>= 1) { 556 SIO_SET(DC_SIO_EE_CLK); 557 dc_delay(sc); 558 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 559 word |= i; 560 dc_delay(sc); 561 SIO_CLR(DC_SIO_EE_CLK); 562 dc_delay(sc); 563 } 564 565 /* Turn off EEPROM access mode. */ 566 dc_eeprom_idle(sc); 567 568 *dest = word; 569 570 return; 571 } 572 573 /* 574 * Read a sequence of words from the EEPROM. 575 */ 576 static void 577 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int swap) 578 { 579 int i; 580 u_int16_t word = 0, *ptr; 581 582 for (i = 0; i < cnt; i++) { 583 if (DC_IS_PNIC(sc)) 584 dc_eeprom_getword_pnic(sc, off + i, &word); 585 else if (DC_IS_XIRCOM(sc)) 586 dc_eeprom_getword_xircom(sc, off + i, &word); 587 else 588 dc_eeprom_getword(sc, off + i, &word); 589 ptr = (u_int16_t *)(dest + (i * 2)); 590 if (swap) 591 *ptr = ntohs(word); 592 else 593 *ptr = word; 594 } 595 596 return; 597 } 598 599 /* 600 * The following two routines are taken from the Macronix 98713 601 * Application Notes pp.19-21. 602 */ 603 /* 604 * Write a bit to the MII bus. 605 */ 606 static void 607 dc_mii_writebit(struct dc_softc *sc, int bit) 608 { 609 if (bit) 610 CSR_WRITE_4(sc, DC_SIO, 611 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 612 else 613 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 614 615 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 616 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 617 618 return; 619 } 620 621 /* 622 * Read a bit from the MII bus. 623 */ 624 static int 625 dc_mii_readbit(struct dc_softc *sc) 626 { 627 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 628 CSR_READ_4(sc, DC_SIO); 629 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 630 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 631 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 632 return(1); 633 634 return(0); 635 } 636 637 /* 638 * Sync the PHYs by setting data bit and strobing the clock 32 times. 639 */ 640 static void 641 dc_mii_sync(struct dc_softc *sc) 642 { 643 int i; 644 645 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 646 647 for (i = 0; i < 32; i++) 648 dc_mii_writebit(sc, 1); 649 650 return; 651 } 652 653 /* 654 * Clock a series of bits through the MII. 655 */ 656 static void 657 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 658 { 659 int i; 660 661 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 662 dc_mii_writebit(sc, bits & i); 663 } 664 665 /* 666 * Read an PHY register through the MII. 667 */ 668 static int 669 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 670 { 671 int ack, i; 672 673 /* 674 * Set up frame for RX. 675 */ 676 frame->mii_stdelim = DC_MII_STARTDELIM; 677 frame->mii_opcode = DC_MII_READOP; 678 frame->mii_turnaround = 0; 679 frame->mii_data = 0; 680 681 /* 682 * Sync the PHYs. 683 */ 684 dc_mii_sync(sc); 685 686 /* 687 * Send command/address info. 688 */ 689 dc_mii_send(sc, frame->mii_stdelim, 2); 690 dc_mii_send(sc, frame->mii_opcode, 2); 691 dc_mii_send(sc, frame->mii_phyaddr, 5); 692 dc_mii_send(sc, frame->mii_regaddr, 5); 693 694 #ifdef notdef 695 /* Idle bit */ 696 dc_mii_writebit(sc, 1); 697 dc_mii_writebit(sc, 0); 698 #endif 699 700 /* Check for ack */ 701 ack = dc_mii_readbit(sc); 702 703 /* 704 * Now try reading data bits. If the ack failed, we still 705 * need to clock through 16 cycles to keep the PHY(s) in sync. 706 */ 707 if (ack) { 708 for(i = 0; i < 16; i++) { 709 dc_mii_readbit(sc); 710 } 711 goto fail; 712 } 713 714 for (i = 0x8000; i; i >>= 1) { 715 if (!ack) { 716 if (dc_mii_readbit(sc)) 717 frame->mii_data |= i; 718 } 719 } 720 721 fail: 722 723 dc_mii_writebit(sc, 0); 724 dc_mii_writebit(sc, 0); 725 726 if (ack) 727 return(1); 728 return(0); 729 } 730 731 /* 732 * Write to a PHY register through the MII. 733 */ 734 static int 735 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 736 { 737 /* 738 * Set up frame for TX. 739 */ 740 741 frame->mii_stdelim = DC_MII_STARTDELIM; 742 frame->mii_opcode = DC_MII_WRITEOP; 743 frame->mii_turnaround = DC_MII_TURNAROUND; 744 745 /* 746 * Sync the PHYs. 747 */ 748 dc_mii_sync(sc); 749 750 dc_mii_send(sc, frame->mii_stdelim, 2); 751 dc_mii_send(sc, frame->mii_opcode, 2); 752 dc_mii_send(sc, frame->mii_phyaddr, 5); 753 dc_mii_send(sc, frame->mii_regaddr, 5); 754 dc_mii_send(sc, frame->mii_turnaround, 2); 755 dc_mii_send(sc, frame->mii_data, 16); 756 757 /* Idle bit. */ 758 dc_mii_writebit(sc, 0); 759 dc_mii_writebit(sc, 0); 760 761 return(0); 762 } 763 764 static int 765 dc_miibus_readreg(device_t dev, int phy, int reg) 766 { 767 struct dc_mii_frame frame; 768 struct dc_softc *sc; 769 int i, rval, phy_reg = 0; 770 771 sc = device_get_softc(dev); 772 bzero((char *)&frame, sizeof(frame)); 773 774 /* 775 * Note: both the AL981 and AN985 have internal PHYs, 776 * however the AL981 provides direct access to the PHY 777 * registers while the AN985 uses a serial MII interface. 778 * The AN985's MII interface is also buggy in that you 779 * can read from any MII address (0 to 31), but only address 1 780 * behaves normally. To deal with both cases, we pretend 781 * that the PHY is at MII address 1. 782 */ 783 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 784 return(0); 785 786 /* 787 * Note: the ukphy probes of the RS7112 report a PHY at 788 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 789 * so we only respond to correct one. 790 */ 791 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 792 return(0); 793 794 if (sc->dc_pmode != DC_PMODE_MII) { 795 if (phy == (MII_NPHY - 1)) { 796 switch(reg) { 797 case MII_BMSR: 798 /* 799 * Fake something to make the probe 800 * code think there's a PHY here. 801 */ 802 return(BMSR_MEDIAMASK); 803 break; 804 case MII_PHYIDR1: 805 if (DC_IS_PNIC(sc)) 806 return(DC_VENDORID_LO); 807 return(DC_VENDORID_DEC); 808 break; 809 case MII_PHYIDR2: 810 if (DC_IS_PNIC(sc)) 811 return(DC_DEVICEID_82C168); 812 return(DC_DEVICEID_21143); 813 break; 814 default: 815 return(0); 816 break; 817 } 818 } else 819 return(0); 820 } 821 822 if (DC_IS_PNIC(sc)) { 823 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 824 (phy << 23) | (reg << 18)); 825 for (i = 0; i < DC_TIMEOUT; i++) { 826 DELAY(1); 827 rval = CSR_READ_4(sc, DC_PN_MII); 828 if (!(rval & DC_PN_MII_BUSY)) { 829 rval &= 0xFFFF; 830 return(rval == 0xFFFF ? 0 : rval); 831 } 832 } 833 return(0); 834 } 835 836 if (DC_IS_COMET(sc)) { 837 switch(reg) { 838 case MII_BMCR: 839 phy_reg = DC_AL_BMCR; 840 break; 841 case MII_BMSR: 842 phy_reg = DC_AL_BMSR; 843 break; 844 case MII_PHYIDR1: 845 phy_reg = DC_AL_VENID; 846 break; 847 case MII_PHYIDR2: 848 phy_reg = DC_AL_DEVID; 849 break; 850 case MII_ANAR: 851 phy_reg = DC_AL_ANAR; 852 break; 853 case MII_ANLPAR: 854 phy_reg = DC_AL_LPAR; 855 break; 856 case MII_ANER: 857 phy_reg = DC_AL_ANER; 858 break; 859 default: 860 if_printf(&sc->arpcom.ac_if, 861 "phy_read: bad phy register %x\n", reg); 862 return(0); 863 break; 864 } 865 866 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 867 868 if (rval == 0xFFFF) 869 return(0); 870 return(rval); 871 } 872 873 frame.mii_phyaddr = phy; 874 frame.mii_regaddr = reg; 875 if (sc->dc_type == DC_TYPE_98713) { 876 phy_reg = CSR_READ_4(sc, DC_NETCFG); 877 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 878 } 879 dc_mii_readreg(sc, &frame); 880 if (sc->dc_type == DC_TYPE_98713) 881 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 882 883 return(frame.mii_data); 884 } 885 886 static int 887 dc_miibus_writereg(device_t dev, int phy, int reg, int data) 888 { 889 struct dc_softc *sc; 890 struct dc_mii_frame frame; 891 int i, phy_reg = 0; 892 893 sc = device_get_softc(dev); 894 bzero((char *)&frame, sizeof(frame)); 895 896 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 897 return(0); 898 899 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 900 return(0); 901 902 if (DC_IS_PNIC(sc)) { 903 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 904 (phy << 23) | (reg << 10) | data); 905 for (i = 0; i < DC_TIMEOUT; i++) { 906 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 907 break; 908 } 909 return(0); 910 } 911 912 if (DC_IS_COMET(sc)) { 913 switch(reg) { 914 case MII_BMCR: 915 phy_reg = DC_AL_BMCR; 916 break; 917 case MII_BMSR: 918 phy_reg = DC_AL_BMSR; 919 break; 920 case MII_PHYIDR1: 921 phy_reg = DC_AL_VENID; 922 break; 923 case MII_PHYIDR2: 924 phy_reg = DC_AL_DEVID; 925 break; 926 case MII_ANAR: 927 phy_reg = DC_AL_ANAR; 928 break; 929 case MII_ANLPAR: 930 phy_reg = DC_AL_LPAR; 931 break; 932 case MII_ANER: 933 phy_reg = DC_AL_ANER; 934 break; 935 default: 936 if_printf(&sc->arpcom.ac_if, 937 "phy_write: bad phy register %x\n", reg); 938 return(0); 939 break; 940 } 941 942 CSR_WRITE_4(sc, phy_reg, data); 943 return(0); 944 } 945 946 frame.mii_phyaddr = phy; 947 frame.mii_regaddr = reg; 948 frame.mii_data = data; 949 950 if (sc->dc_type == DC_TYPE_98713) { 951 phy_reg = CSR_READ_4(sc, DC_NETCFG); 952 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 953 } 954 dc_mii_writereg(sc, &frame); 955 if (sc->dc_type == DC_TYPE_98713) 956 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 957 958 return(0); 959 } 960 961 static void 962 dc_miibus_statchg(device_t dev) 963 { 964 struct dc_softc *sc; 965 struct mii_data *mii; 966 struct ifmedia *ifm; 967 968 sc = device_get_softc(dev); 969 if (DC_IS_ADMTEK(sc)) 970 return; 971 972 mii = device_get_softc(sc->dc_miibus); 973 ifm = &mii->mii_media; 974 if (DC_IS_DAVICOM(sc) && 975 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 976 dc_setcfg(sc, ifm->ifm_media); 977 sc->dc_if_media = ifm->ifm_media; 978 } else { 979 dc_setcfg(sc, mii->mii_media_active); 980 sc->dc_if_media = mii->mii_media_active; 981 } 982 983 return; 984 } 985 986 /* 987 * Special support for DM9102A cards with HomePNA PHYs. Note: 988 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 989 * to be impossible to talk to the management interface of the DM9801 990 * PHY (its MDIO pin is not connected to anything). Consequently, 991 * the driver has to just 'know' about the additional mode and deal 992 * with it itself. *sigh* 993 */ 994 static void 995 dc_miibus_mediainit(device_t dev) 996 { 997 struct dc_softc *sc; 998 struct mii_data *mii; 999 struct ifmedia *ifm; 1000 int rev; 1001 1002 rev = pci_get_revid(dev); 1003 1004 sc = device_get_softc(dev); 1005 mii = device_get_softc(sc->dc_miibus); 1006 ifm = &mii->mii_media; 1007 1008 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1009 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 1010 1011 return; 1012 } 1013 1014 #define DC_BITS_512 9 1015 #define DC_BITS_128 7 1016 #define DC_BITS_64 6 1017 1018 static u_int32_t 1019 dc_crc_mask(struct dc_softc *sc) 1020 { 1021 /* 1022 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1023 * chips is only 128 bits wide. 1024 */ 1025 if (sc->dc_flags & DC_128BIT_HASH) 1026 return ((1 << DC_BITS_128) - 1); 1027 1028 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1029 if (sc->dc_flags & DC_64BIT_HASH) 1030 return ((1 << DC_BITS_64) - 1); 1031 1032 return ((1 << DC_BITS_512) - 1); 1033 } 1034 1035 /* 1036 * 21143-style RX filter setup routine. Filter programming is done by 1037 * downloading a special setup frame into the TX engine. 21143, Macronix, 1038 * PNIC, PNIC II and Davicom chips are programmed this way. 1039 * 1040 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1041 * address (our node address) and a 512-bit hash filter for multicast 1042 * frames. We also sneak the broadcast address into the hash filter since 1043 * we need that too. 1044 */ 1045 void 1046 dc_setfilt_21143(struct dc_softc *sc) 1047 { 1048 struct dc_desc *sframe; 1049 u_int32_t h, crc_mask, *sp; 1050 struct ifmultiaddr *ifma; 1051 struct ifnet *ifp; 1052 int i; 1053 1054 ifp = &sc->arpcom.ac_if; 1055 1056 i = sc->dc_cdata.dc_tx_prod; 1057 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1058 sc->dc_cdata.dc_tx_cnt++; 1059 sframe = &sc->dc_ldata->dc_tx_list[i]; 1060 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1061 bzero((char *)sp, DC_SFRAME_LEN); 1062 1063 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1064 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1065 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1066 1067 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1068 1069 /* If we want promiscuous mode, set the allframes bit. */ 1070 if (ifp->if_flags & IFF_PROMISC) 1071 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1072 else 1073 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1074 1075 if (ifp->if_flags & IFF_ALLMULTI) 1076 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1077 else 1078 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1079 1080 crc_mask = dc_crc_mask(sc); 1081 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1082 if (ifma->ifma_addr->sa_family != AF_LINK) 1083 continue; 1084 h = ether_crc32_le( 1085 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1086 ETHER_ADDR_LEN) & crc_mask; 1087 sp[h >> 4] |= 1 << (h & 0xF); 1088 } 1089 1090 if (ifp->if_flags & IFF_BROADCAST) { 1091 h = ether_crc32_le(ifp->if_broadcastaddr, 1092 ETHER_ADDR_LEN) & crc_mask; 1093 sp[h >> 4] |= 1 << (h & 0xF); 1094 } 1095 1096 /* Set our MAC address */ 1097 sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1098 sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1099 sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1100 1101 sframe->dc_status = DC_TXSTAT_OWN; 1102 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1103 1104 /* 1105 * The PNIC takes an exceedingly long time to process its 1106 * setup frame; wait 10ms after posting the setup frame 1107 * before proceeding, just so it has time to swallow its 1108 * medicine. 1109 */ 1110 DELAY(10000); 1111 1112 ifp->if_timer = 5; 1113 1114 return; 1115 } 1116 1117 void 1118 dc_setfilt_admtek(struct dc_softc *sc) 1119 { 1120 struct ifnet *ifp; 1121 int h = 0; 1122 u_int32_t crc_mask; 1123 u_int32_t hashes[2] = { 0, 0 }; 1124 struct ifmultiaddr *ifma; 1125 1126 ifp = &sc->arpcom.ac_if; 1127 1128 /* Init our MAC address */ 1129 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1130 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1131 1132 /* If we want promiscuous mode, set the allframes bit. */ 1133 if (ifp->if_flags & IFF_PROMISC) 1134 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1135 else 1136 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1137 1138 if (ifp->if_flags & IFF_ALLMULTI) 1139 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1140 else 1141 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1142 1143 /* first, zot all the existing hash bits */ 1144 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1145 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1146 1147 /* 1148 * If we're already in promisc or allmulti mode, we 1149 * don't have to bother programming the multicast filter. 1150 */ 1151 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1152 return; 1153 1154 /* now program new ones */ 1155 if (DC_IS_CENTAUR(sc)) 1156 crc_mask = dc_crc_mask(sc); 1157 else 1158 crc_mask = 0x3f; 1159 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1160 if (ifma->ifma_addr->sa_family != AF_LINK) 1161 continue; 1162 if (DC_IS_CENTAUR(sc)) { 1163 h = ether_crc32_le( 1164 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1165 ETHER_ADDR_LEN) & crc_mask; 1166 } else { 1167 h = ether_crc32_be( 1168 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1169 ETHER_ADDR_LEN); 1170 h = (h >> 26) & crc_mask; 1171 } 1172 if (h < 32) 1173 hashes[0] |= (1 << h); 1174 else 1175 hashes[1] |= (1 << (h - 32)); 1176 } 1177 1178 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1179 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1180 1181 return; 1182 } 1183 1184 void 1185 dc_setfilt_asix(struct dc_softc *sc) 1186 { 1187 struct ifnet *ifp; 1188 int h = 0; 1189 u_int32_t hashes[2] = { 0, 0 }; 1190 struct ifmultiaddr *ifma; 1191 1192 ifp = &sc->arpcom.ac_if; 1193 1194 /* Init our MAC address */ 1195 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1196 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1197 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1198 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1199 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1200 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1201 1202 /* If we want promiscuous mode, set the allframes bit. */ 1203 if (ifp->if_flags & IFF_PROMISC) 1204 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1205 else 1206 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1207 1208 if (ifp->if_flags & IFF_ALLMULTI) 1209 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1210 else 1211 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1212 1213 /* 1214 * The ASIX chip has a special bit to enable reception 1215 * of broadcast frames. 1216 */ 1217 if (ifp->if_flags & IFF_BROADCAST) 1218 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1219 else 1220 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1221 1222 /* first, zot all the existing hash bits */ 1223 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1224 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1225 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1226 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1227 1228 /* 1229 * If we're already in promisc or allmulti mode, we 1230 * don't have to bother programming the multicast filter. 1231 */ 1232 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1233 return; 1234 1235 /* now program new ones */ 1236 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1237 if (ifma->ifma_addr->sa_family != AF_LINK) 1238 continue; 1239 h = ether_crc32_be( 1240 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1241 ETHER_ADDR_LEN); 1242 h = (h >> 26) & 0x3f; 1243 if (h < 32) 1244 hashes[0] |= (1 << h); 1245 else 1246 hashes[1] |= (1 << (h - 32)); 1247 } 1248 1249 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1250 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1251 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1252 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1253 1254 return; 1255 } 1256 1257 void 1258 dc_setfilt_xircom(struct dc_softc *sc) 1259 { 1260 struct dc_desc *sframe; 1261 u_int32_t h, *sp; 1262 struct ifmultiaddr *ifma; 1263 struct ifnet *ifp; 1264 int i; 1265 1266 ifp = &sc->arpcom.ac_if; 1267 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1268 1269 i = sc->dc_cdata.dc_tx_prod; 1270 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1271 sc->dc_cdata.dc_tx_cnt++; 1272 sframe = &sc->dc_ldata->dc_tx_list[i]; 1273 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1274 bzero(sp, DC_SFRAME_LEN); 1275 1276 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1277 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1278 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1279 1280 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1281 1282 /* If we want promiscuous mode, set the allframes bit. */ 1283 if (ifp->if_flags & IFF_PROMISC) 1284 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1285 else 1286 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1287 1288 if (ifp->if_flags & IFF_ALLMULTI) 1289 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1290 else 1291 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1292 1293 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1294 if (ifma->ifma_addr->sa_family != AF_LINK) 1295 continue; 1296 h = dc_mchash_xircom(sc, 1297 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1298 sp[h >> 4] |= 1 << (h & 0xF); 1299 } 1300 1301 if (ifp->if_flags & IFF_BROADCAST) { 1302 h = dc_mchash_xircom(sc, (caddr_t)ðerbroadcastaddr); 1303 sp[h >> 4] |= 1 << (h & 0xF); 1304 } 1305 1306 /* Set our MAC address */ 1307 sp[0] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1308 sp[1] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1309 sp[2] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1310 1311 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1312 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1313 ifp->if_flags |= IFF_RUNNING; 1314 sframe->dc_status = DC_TXSTAT_OWN; 1315 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1316 1317 /* 1318 * wait some time... 1319 */ 1320 DELAY(1000); 1321 1322 ifp->if_timer = 5; 1323 } 1324 1325 static void 1326 dc_setfilt(struct dc_softc *sc) 1327 { 1328 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1329 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1330 dc_setfilt_21143(sc); 1331 1332 if (DC_IS_ASIX(sc)) 1333 dc_setfilt_asix(sc); 1334 1335 if (DC_IS_ADMTEK(sc)) 1336 dc_setfilt_admtek(sc); 1337 1338 if (DC_IS_XIRCOM(sc)) 1339 dc_setfilt_xircom(sc); 1340 } 1341 1342 /* 1343 * In order to fiddle with the 1344 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1345 * first have to put the transmit and/or receive logic in the idle state. 1346 */ 1347 static void 1348 dc_setcfg(struct dc_softc *sc, int media) 1349 { 1350 int i, restart = 0; 1351 u_int32_t isr; 1352 1353 if (IFM_SUBTYPE(media) == IFM_NONE) 1354 return; 1355 1356 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1357 restart = 1; 1358 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1359 1360 for (i = 0; i < DC_TIMEOUT; i++) { 1361 isr = CSR_READ_4(sc, DC_ISR); 1362 if ((isr & DC_ISR_TX_IDLE) && 1363 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1364 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1365 break; 1366 DELAY(10); 1367 } 1368 1369 if (i == DC_TIMEOUT) { 1370 if_printf(&sc->arpcom.ac_if, 1371 "failed to force tx and rx to idle state\n"); 1372 } 1373 } 1374 1375 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1376 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1377 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1378 if (sc->dc_pmode == DC_PMODE_MII) { 1379 int watchdogreg; 1380 1381 if (DC_IS_INTEL(sc)) { 1382 /* there's a write enable bit here that reads as 1 */ 1383 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1384 watchdogreg &= ~DC_WDOG_CTLWREN; 1385 watchdogreg |= DC_WDOG_JABBERDIS; 1386 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1387 } else { 1388 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1389 } 1390 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1391 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1392 if (sc->dc_type == DC_TYPE_98713) 1393 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1394 DC_NETCFG_SCRAMBLER)); 1395 if (!DC_IS_DAVICOM(sc)) 1396 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1397 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1398 if (DC_IS_INTEL(sc)) 1399 dc_apply_fixup(sc, IFM_AUTO); 1400 } else { 1401 if (DC_IS_PNIC(sc)) { 1402 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1403 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1404 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1405 } 1406 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1407 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1408 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1409 if (DC_IS_INTEL(sc)) 1410 dc_apply_fixup(sc, 1411 (media & IFM_GMASK) == IFM_FDX ? 1412 IFM_100_TX|IFM_FDX : IFM_100_TX); 1413 } 1414 } 1415 1416 if (IFM_SUBTYPE(media) == IFM_10_T) { 1417 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1418 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1419 if (sc->dc_pmode == DC_PMODE_MII) { 1420 int watchdogreg; 1421 1422 /* there's a write enable bit here that reads as 1 */ 1423 if (DC_IS_INTEL(sc)) { 1424 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1425 watchdogreg &= ~DC_WDOG_CTLWREN; 1426 watchdogreg |= DC_WDOG_JABBERDIS; 1427 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1428 } else { 1429 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1430 } 1431 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1432 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1433 if (sc->dc_type == DC_TYPE_98713) 1434 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1435 if (!DC_IS_DAVICOM(sc)) 1436 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1437 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1438 if (DC_IS_INTEL(sc)) 1439 dc_apply_fixup(sc, IFM_AUTO); 1440 } else { 1441 if (DC_IS_PNIC(sc)) { 1442 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1443 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1444 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1445 } 1446 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1447 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1448 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1449 if (DC_IS_INTEL(sc)) { 1450 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1451 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1452 if ((media & IFM_GMASK) == IFM_FDX) 1453 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1454 else 1455 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1456 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1457 DC_CLRBIT(sc, DC_10BTCTRL, 1458 DC_TCTL_AUTONEGENBL); 1459 dc_apply_fixup(sc, 1460 (media & IFM_GMASK) == IFM_FDX ? 1461 IFM_10_T|IFM_FDX : IFM_10_T); 1462 DELAY(20000); 1463 } 1464 } 1465 } 1466 1467 /* 1468 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1469 * PHY and we want HomePNA mode, set the portsel bit to turn 1470 * on the external MII port. 1471 */ 1472 if (DC_IS_DAVICOM(sc)) { 1473 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1474 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1475 sc->dc_link = 1; 1476 } else { 1477 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1478 } 1479 } 1480 1481 if ((media & IFM_GMASK) == IFM_FDX) { 1482 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1483 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1484 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1485 } else { 1486 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1487 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1488 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1489 } 1490 1491 if (restart) 1492 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1493 1494 return; 1495 } 1496 1497 static void 1498 dc_reset(struct dc_softc *sc) 1499 { 1500 int i; 1501 1502 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1503 1504 for (i = 0; i < DC_TIMEOUT; i++) { 1505 DELAY(10); 1506 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1507 break; 1508 } 1509 1510 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || 1511 DC_IS_CONEXANT(sc)) { 1512 DELAY(10000); 1513 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1514 i = 0; 1515 } 1516 1517 if (i == DC_TIMEOUT) 1518 if_printf(&sc->arpcom.ac_if, "reset never completed!\n"); 1519 1520 /* Wait a little while for the chip to get its brains in order. */ 1521 DELAY(1000); 1522 1523 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1524 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1525 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1526 1527 /* 1528 * Bring the SIA out of reset. In some cases, it looks 1529 * like failing to unreset the SIA soon enough gets it 1530 * into a state where it will never come out of reset 1531 * until we reset the whole chip again. 1532 */ 1533 if (DC_IS_INTEL(sc)) { 1534 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1535 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1536 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1537 } 1538 1539 return; 1540 } 1541 1542 static const struct dc_type * 1543 dc_devtype(device_t dev) 1544 { 1545 const struct dc_type *t; 1546 u_int32_t rev; 1547 1548 t = dc_devs; 1549 1550 while(t->dc_name != NULL) { 1551 if ((pci_get_vendor(dev) == t->dc_vid) && 1552 (pci_get_device(dev) == t->dc_did)) { 1553 /* Check the PCI revision */ 1554 rev = pci_get_revid(dev); 1555 if (t->dc_did == DC_DEVICEID_98713 && 1556 rev >= DC_REVISION_98713A) 1557 t++; 1558 if (t->dc_did == DC_DEVICEID_98713_CP && 1559 rev >= DC_REVISION_98713A) 1560 t++; 1561 if (t->dc_did == DC_DEVICEID_987x5 && 1562 rev >= DC_REVISION_98715AEC_C) 1563 t++; 1564 if (t->dc_did == DC_DEVICEID_987x5 && 1565 rev >= DC_REVISION_98725) 1566 t++; 1567 if (t->dc_did == DC_DEVICEID_AX88140A && 1568 rev >= DC_REVISION_88141) 1569 t++; 1570 if (t->dc_did == DC_DEVICEID_82C168 && 1571 rev >= DC_REVISION_82C169) 1572 t++; 1573 if (t->dc_did == DC_DEVICEID_DM9102 && 1574 rev >= DC_REVISION_DM9102A) 1575 t++; 1576 return(t); 1577 } 1578 t++; 1579 } 1580 1581 return(NULL); 1582 } 1583 1584 /* 1585 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1586 * IDs against our list and return a device name if we find a match. 1587 * We do a little bit of extra work to identify the exact type of 1588 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1589 * but different revision IDs. The same is true for 98715/98715A 1590 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1591 * cases, the exact chip revision affects driver behavior. 1592 */ 1593 static int 1594 dc_probe(device_t dev) 1595 { 1596 const struct dc_type *t; 1597 1598 t = dc_devtype(dev); 1599 if (t != NULL) { 1600 struct dc_softc *sc = device_get_softc(dev); 1601 1602 /* Need this info to decide on a chip type. */ 1603 sc->dc_info = t; 1604 device_set_desc(dev, t->dc_name); 1605 return(0); 1606 } 1607 1608 return(ENXIO); 1609 } 1610 1611 static void 1612 dc_acpi(device_t dev) 1613 { 1614 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1615 uint32_t iobase, membase, irq; 1616 struct dc_softc *sc; 1617 1618 /* Save important PCI config data. */ 1619 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1620 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1621 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1622 1623 sc = device_get_softc(dev); 1624 /* Reset the power state. */ 1625 if_printf(&sc->arpcom.ac_if, 1626 "chip is in D%d power mode " 1627 "-- setting to D0\n", pci_get_powerstate(dev)); 1628 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1629 1630 /* Restore PCI config data. */ 1631 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1632 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1633 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1634 } 1635 } 1636 1637 static void 1638 dc_apply_fixup(struct dc_softc *sc, int media) 1639 { 1640 struct dc_mediainfo *m; 1641 u_int8_t *p; 1642 int i; 1643 u_int32_t reg; 1644 1645 m = sc->dc_mi; 1646 1647 while (m != NULL) { 1648 if (m->dc_media == media) 1649 break; 1650 m = m->dc_next; 1651 } 1652 1653 if (m == NULL) 1654 return; 1655 1656 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1657 reg = (p[0] | (p[1] << 8)) << 16; 1658 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1659 } 1660 1661 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1662 reg = (p[0] | (p[1] << 8)) << 16; 1663 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1664 } 1665 1666 return; 1667 } 1668 1669 static void 1670 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1671 { 1672 struct dc_mediainfo *m; 1673 1674 m = kmalloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1675 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT){ 1676 case DC_SIA_CODE_10BT: 1677 m->dc_media = IFM_10_T; 1678 break; 1679 1680 case DC_SIA_CODE_10BT_FDX: 1681 m->dc_media = IFM_10_T|IFM_FDX; 1682 break; 1683 1684 case DC_SIA_CODE_10B2: 1685 m->dc_media = IFM_10_2; 1686 break; 1687 1688 case DC_SIA_CODE_10B5: 1689 m->dc_media = IFM_10_5; 1690 break; 1691 } 1692 if (l->dc_sia_code & DC_SIA_CODE_EXT){ 1693 m->dc_gp_len = 2; 1694 m->dc_gp_ptr = 1695 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1696 } else { 1697 m->dc_gp_len = 2; 1698 m->dc_gp_ptr = 1699 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1700 } 1701 1702 m->dc_next = sc->dc_mi; 1703 sc->dc_mi = m; 1704 1705 sc->dc_pmode = DC_PMODE_SIA; 1706 1707 return; 1708 } 1709 1710 static void 1711 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1712 { 1713 struct dc_mediainfo *m; 1714 1715 m = kmalloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1716 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1717 m->dc_media = IFM_100_TX; 1718 1719 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1720 m->dc_media = IFM_100_TX|IFM_FDX; 1721 1722 m->dc_gp_len = 2; 1723 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1724 1725 m->dc_next = sc->dc_mi; 1726 sc->dc_mi = m; 1727 1728 sc->dc_pmode = DC_PMODE_SYM; 1729 1730 return; 1731 } 1732 1733 static void 1734 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1735 { 1736 u_int8_t *p; 1737 struct dc_mediainfo *m; 1738 1739 m = kmalloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_INTWAIT | M_ZERO); 1740 /* We abuse IFM_AUTO to represent MII. */ 1741 m->dc_media = IFM_AUTO; 1742 m->dc_gp_len = l->dc_gpr_len; 1743 1744 p = (u_int8_t *)l; 1745 p += sizeof(struct dc_eblock_mii); 1746 m->dc_gp_ptr = p; 1747 p += 2 * l->dc_gpr_len; 1748 m->dc_reset_len = *p; 1749 p++; 1750 m->dc_reset_ptr = p; 1751 1752 m->dc_next = sc->dc_mi; 1753 sc->dc_mi = m; 1754 1755 return; 1756 } 1757 1758 static void 1759 dc_read_srom(struct dc_softc *sc, int bits) 1760 { 1761 int size; 1762 1763 size = 2 << bits; 1764 sc->dc_srom = kmalloc(size, M_DEVBUF, M_INTWAIT); 1765 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1766 } 1767 1768 static void 1769 dc_parse_21143_srom(struct dc_softc *sc) 1770 { 1771 struct dc_leaf_hdr *lhdr; 1772 struct dc_eblock_hdr *hdr; 1773 int i, loff; 1774 char *ptr; 1775 int have_mii; 1776 1777 have_mii = 0; 1778 loff = sc->dc_srom[27]; 1779 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1780 1781 ptr = (char *)lhdr; 1782 ptr += sizeof(struct dc_leaf_hdr) - 1; 1783 /* 1784 * Look if we got a MII media block. 1785 */ 1786 for (i = 0; i < lhdr->dc_mcnt; i++) { 1787 hdr = (struct dc_eblock_hdr *)ptr; 1788 if (hdr->dc_type == DC_EBLOCK_MII) 1789 have_mii++; 1790 1791 ptr += (hdr->dc_len & 0x7F); 1792 ptr++; 1793 } 1794 1795 /* 1796 * Do the same thing again. Only use SIA and SYM media 1797 * blocks if no MII media block is available. 1798 */ 1799 ptr = (char *)lhdr; 1800 ptr += sizeof(struct dc_leaf_hdr) - 1; 1801 for (i = 0; i < lhdr->dc_mcnt; i++) { 1802 hdr = (struct dc_eblock_hdr *)ptr; 1803 switch(hdr->dc_type) { 1804 case DC_EBLOCK_MII: 1805 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1806 break; 1807 case DC_EBLOCK_SIA: 1808 if (! have_mii) 1809 dc_decode_leaf_sia(sc, 1810 (struct dc_eblock_sia *)hdr); 1811 break; 1812 case DC_EBLOCK_SYM: 1813 if (! have_mii) 1814 dc_decode_leaf_sym(sc, 1815 (struct dc_eblock_sym *)hdr); 1816 break; 1817 default: 1818 /* Don't care. Yet. */ 1819 break; 1820 } 1821 ptr += (hdr->dc_len & 0x7F); 1822 ptr++; 1823 } 1824 1825 return; 1826 } 1827 1828 /* 1829 * Attach the interface. Allocate softc structures, do ifmedia 1830 * setup and ethernet/BPF attach. 1831 */ 1832 static int 1833 dc_attach(device_t dev) 1834 { 1835 int tmp = 0; 1836 u_char eaddr[ETHER_ADDR_LEN]; 1837 u_int32_t command; 1838 struct dc_softc *sc; 1839 struct ifnet *ifp; 1840 u_int32_t revision; 1841 int error = 0, rid, mac_offset; 1842 uint8_t *mac; 1843 1844 sc = device_get_softc(dev); 1845 callout_init(&sc->dc_stat_timer); 1846 1847 ifp = &sc->arpcom.ac_if; 1848 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1849 1850 /* 1851 * Handle power management nonsense. 1852 */ 1853 dc_acpi(dev); 1854 1855 /* 1856 * Map control/status registers. 1857 */ 1858 pci_enable_busmaster(dev); 1859 1860 rid = DC_RID; 1861 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); 1862 1863 if (sc->dc_res == NULL) { 1864 device_printf(dev, "couldn't map ports/memory\n"); 1865 error = ENXIO; 1866 goto fail; 1867 } 1868 1869 sc->dc_btag = rman_get_bustag(sc->dc_res); 1870 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1871 1872 /* Allocate interrupt */ 1873 rid = 0; 1874 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1875 RF_SHAREABLE | RF_ACTIVE); 1876 1877 if (sc->dc_irq == NULL) { 1878 device_printf(dev, "couldn't map interrupt\n"); 1879 error = ENXIO; 1880 goto fail; 1881 } 1882 1883 revision = pci_get_revid(dev); 1884 1885 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ 1886 if (sc->dc_info->dc_did != DC_DEVICEID_82C168 && 1887 sc->dc_info->dc_did != DC_DEVICEID_X3201) 1888 dc_eeprom_width(sc); 1889 1890 switch(sc->dc_info->dc_did) { 1891 case DC_DEVICEID_21143: 1892 sc->dc_type = DC_TYPE_21143; 1893 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1894 sc->dc_flags |= DC_REDUCED_MII_POLL; 1895 /* Save EEPROM contents so we can parse them later. */ 1896 dc_read_srom(sc, sc->dc_romwidth); 1897 break; 1898 case DC_DEVICEID_DM9009: 1899 case DC_DEVICEID_DM9100: 1900 case DC_DEVICEID_DM9102: 1901 sc->dc_type = DC_TYPE_DM9102; 1902 sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; 1903 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; 1904 sc->dc_flags |= DC_TX_ALIGN; 1905 sc->dc_pmode = DC_PMODE_MII; 1906 /* Increase the latency timer value. */ 1907 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1908 command &= 0xFFFF00FF; 1909 command |= 0x00008000; 1910 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1911 break; 1912 case DC_DEVICEID_AL981: 1913 sc->dc_type = DC_TYPE_AL981; 1914 sc->dc_flags |= DC_TX_USE_TX_INTR; 1915 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1916 sc->dc_pmode = DC_PMODE_MII; 1917 dc_read_srom(sc, sc->dc_romwidth); 1918 break; 1919 case DC_DEVICEID_AN985: 1920 case DC_DEVICEID_ADM9511: 1921 case DC_DEVICEID_ADM9513: 1922 case DC_DEVICEID_FA511: 1923 case DC_DEVICEID_EN2242: 1924 case DC_DEVICEID_3CSOHOB: 1925 sc->dc_type = DC_TYPE_AN985; 1926 sc->dc_flags |= DC_64BIT_HASH; 1927 sc->dc_flags |= DC_TX_USE_TX_INTR; 1928 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1929 sc->dc_pmode = DC_PMODE_MII; 1930 break; 1931 case DC_DEVICEID_98713: 1932 case DC_DEVICEID_98713_CP: 1933 if (revision < DC_REVISION_98713A) { 1934 sc->dc_type = DC_TYPE_98713; 1935 } 1936 if (revision >= DC_REVISION_98713A) { 1937 sc->dc_type = DC_TYPE_98713A; 1938 sc->dc_flags |= DC_21143_NWAY; 1939 } 1940 sc->dc_flags |= DC_REDUCED_MII_POLL; 1941 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1942 break; 1943 case DC_DEVICEID_987x5: 1944 case DC_DEVICEID_EN1217: 1945 /* 1946 * Macronix MX98715AEC-C/D/E parts have only a 1947 * 128-bit hash table. We need to deal with these 1948 * in the same manner as the PNIC II so that we 1949 * get the right number of bits out of the 1950 * CRC routine. 1951 */ 1952 if (revision >= DC_REVISION_98715AEC_C && 1953 revision < DC_REVISION_98725) 1954 sc->dc_flags |= DC_128BIT_HASH; 1955 sc->dc_type = DC_TYPE_987x5; 1956 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1957 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1958 break; 1959 case DC_DEVICEID_98727: 1960 sc->dc_type = DC_TYPE_987x5; 1961 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1962 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1963 break; 1964 case DC_DEVICEID_82C115: 1965 sc->dc_type = DC_TYPE_PNICII; 1966 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; 1967 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 1968 break; 1969 case DC_DEVICEID_82C168: 1970 sc->dc_type = DC_TYPE_PNIC; 1971 sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; 1972 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 1973 sc->dc_pnic_rx_buf = kmalloc(DC_RXLEN * 5, M_DEVBUF, M_WAITOK); 1974 if (revision < DC_REVISION_82C169) 1975 sc->dc_pmode = DC_PMODE_SYM; 1976 break; 1977 case DC_DEVICEID_AX88140A: 1978 sc->dc_type = DC_TYPE_ASIX; 1979 sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; 1980 sc->dc_flags |= DC_REDUCED_MII_POLL; 1981 sc->dc_pmode = DC_PMODE_MII; 1982 break; 1983 case DC_DEVICEID_RS7112: 1984 sc->dc_type = DC_TYPE_CONEXANT; 1985 sc->dc_flags |= DC_TX_INTR_ALWAYS; 1986 sc->dc_flags |= DC_REDUCED_MII_POLL; 1987 sc->dc_pmode = DC_PMODE_MII; 1988 dc_read_srom(sc, sc->dc_romwidth); 1989 break; 1990 case DC_DEVICEID_X3201: 1991 sc->dc_type = DC_TYPE_XIRCOM; 1992 sc->dc_flags |= (DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 1993 DC_TX_ALIGN); 1994 /* 1995 * We don't actually need to coalesce, but we're doing 1996 * it to obtain a double word aligned buffer. 1997 * The DC_TX_COALESCE flag is required. 1998 */ 1999 sc->dc_pmode = DC_PMODE_MII; 2000 break; 2001 default: 2002 device_printf(dev, "unknown device: %x\n", sc->dc_info->dc_did); 2003 break; 2004 } 2005 2006 /* Save the cache line size. */ 2007 if (DC_IS_DAVICOM(sc)) 2008 sc->dc_cachesize = 0; 2009 else 2010 sc->dc_cachesize = pci_read_config(dev, 2011 DC_PCI_CFLT, 4) & 0xFF; 2012 2013 /* Reset the adapter. */ 2014 dc_reset(sc); 2015 2016 /* Take 21143 out of snooze mode */ 2017 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2018 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2019 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 2020 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2021 } 2022 2023 /* 2024 * Try to learn something about the supported media. 2025 * We know that ASIX and ADMtek and Davicom devices 2026 * will *always* be using MII media, so that's a no-brainer. 2027 * The tricky ones are the Macronix/PNIC II and the 2028 * Intel 21143. 2029 */ 2030 if (DC_IS_INTEL(sc)) 2031 dc_parse_21143_srom(sc); 2032 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2033 if (sc->dc_type == DC_TYPE_98713) 2034 sc->dc_pmode = DC_PMODE_MII; 2035 else 2036 sc->dc_pmode = DC_PMODE_SYM; 2037 } else if (!sc->dc_pmode) 2038 sc->dc_pmode = DC_PMODE_MII; 2039 2040 /* 2041 * Get station address from the EEPROM. 2042 */ 2043 switch(sc->dc_type) { 2044 case DC_TYPE_98713: 2045 case DC_TYPE_98713A: 2046 case DC_TYPE_987x5: 2047 case DC_TYPE_PNICII: 2048 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2049 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2050 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2051 break; 2052 case DC_TYPE_PNIC: 2053 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2054 break; 2055 case DC_TYPE_DM9102: 2056 case DC_TYPE_21143: 2057 case DC_TYPE_ASIX: 2058 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2059 break; 2060 case DC_TYPE_AL981: 2061 case DC_TYPE_AN985: 2062 *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc,DC_AL_PAR0); 2063 *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc,DC_AL_PAR1); 2064 break; 2065 case DC_TYPE_CONEXANT: 2066 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 6); 2067 break; 2068 case DC_TYPE_XIRCOM: 2069 /* The MAC comes from the CIS */ 2070 mac = pci_get_ether(dev); 2071 if (!mac) { 2072 device_printf(dev, "No station address in CIS!\n"); 2073 error = ENXIO; 2074 goto fail; 2075 } 2076 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2077 break; 2078 default: 2079 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2080 break; 2081 } 2082 2083 sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, 2084 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 2085 2086 if (sc->dc_ldata == NULL) { 2087 device_printf(dev, "no memory for list buffers!\n"); 2088 error = ENXIO; 2089 goto fail; 2090 } 2091 2092 bzero(sc->dc_ldata, sizeof(struct dc_list_data)); 2093 2094 ifp->if_softc = sc; 2095 ifp->if_mtu = ETHERMTU; 2096 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2097 ifp->if_ioctl = dc_ioctl; 2098 ifp->if_start = dc_start; 2099 #ifdef DEVICE_POLLING 2100 ifp->if_poll = dc_poll; 2101 #endif 2102 ifp->if_watchdog = dc_watchdog; 2103 ifp->if_init = dc_init; 2104 ifp->if_baudrate = 10000000; 2105 ifq_set_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1); 2106 ifq_set_ready(&ifp->if_snd); 2107 2108 /* 2109 * Do MII setup. If this is a 21143, check for a PHY on the 2110 * MII bus after applying any necessary fixups to twiddle the 2111 * GPIO bits. If we don't end up finding a PHY, restore the 2112 * old selection (SIA only or SIA/SYM) and attach the dcphy 2113 * driver instead. 2114 */ 2115 if (DC_IS_INTEL(sc)) { 2116 dc_apply_fixup(sc, IFM_AUTO); 2117 tmp = sc->dc_pmode; 2118 sc->dc_pmode = DC_PMODE_MII; 2119 } 2120 2121 /* 2122 * Setup General Purpose port mode and data so the tulip can talk 2123 * to the MII. This needs to be done before mii_phy_probe so that 2124 * we can actually see them. 2125 */ 2126 if (DC_IS_XIRCOM(sc)) { 2127 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2128 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2129 DELAY(10); 2130 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2131 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2132 DELAY(10); 2133 } 2134 2135 error = mii_phy_probe(dev, &sc->dc_miibus, 2136 dc_ifmedia_upd, dc_ifmedia_sts); 2137 2138 if (error && DC_IS_INTEL(sc)) { 2139 sc->dc_pmode = tmp; 2140 if (sc->dc_pmode != DC_PMODE_SIA) 2141 sc->dc_pmode = DC_PMODE_SYM; 2142 sc->dc_flags |= DC_21143_NWAY; 2143 mii_phy_probe(dev, &sc->dc_miibus, 2144 dc_ifmedia_upd, dc_ifmedia_sts); 2145 /* 2146 * For non-MII cards, we need to have the 21143 2147 * drive the LEDs. Except there are some systems 2148 * like the NEC VersaPro NoteBook PC which have no 2149 * LEDs, and twiddling these bits has adverse effects 2150 * on them. (I.e. you suddenly can't get a link.) 2151 */ 2152 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2153 sc->dc_flags |= DC_TULIP_LEDS; 2154 error = 0; 2155 } 2156 2157 if (error) { 2158 device_printf(dev, "MII without any PHY!\n"); 2159 error = ENXIO; 2160 goto fail; 2161 } 2162 2163 /* 2164 * Call MI attach routine. 2165 */ 2166 ether_ifattach(ifp, eaddr, NULL); 2167 2168 if (DC_IS_ADMTEK(sc)) { 2169 /* 2170 * Set automatic TX underrun recovery for the ADMtek chips 2171 */ 2172 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2173 } 2174 2175 /* 2176 * Tell the upper layer(s) we support long frames. 2177 */ 2178 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2179 2180 error = bus_setup_intr(dev, sc->dc_irq, INTR_NETSAFE, 2181 dc_intr, sc, &sc->dc_intrhand, 2182 ifp->if_serializer); 2183 if (error) { 2184 ether_ifdetach(ifp); 2185 device_printf(dev, "couldn't set up irq\n"); 2186 goto fail; 2187 } 2188 2189 return(0); 2190 2191 fail: 2192 dc_detach(dev); 2193 return(error); 2194 } 2195 2196 static int 2197 dc_detach(device_t dev) 2198 { 2199 struct dc_softc *sc = device_get_softc(dev); 2200 struct ifnet *ifp = &sc->arpcom.ac_if; 2201 struct dc_mediainfo *m; 2202 2203 if (device_is_attached(dev)) { 2204 lwkt_serialize_enter(ifp->if_serializer); 2205 dc_stop(sc); 2206 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2207 lwkt_serialize_exit(ifp->if_serializer); 2208 2209 ether_ifdetach(ifp); 2210 } 2211 2212 if (sc->dc_miibus) 2213 device_delete_child(dev, sc->dc_miibus); 2214 bus_generic_detach(dev); 2215 2216 if (sc->dc_irq) 2217 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2218 if (sc->dc_res) 2219 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2220 2221 if (sc->dc_ldata) 2222 contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); 2223 if (sc->dc_pnic_rx_buf != NULL) 2224 kfree(sc->dc_pnic_rx_buf, M_DEVBUF); 2225 2226 while (sc->dc_mi != NULL) { 2227 m = sc->dc_mi->dc_next; 2228 kfree(sc->dc_mi, M_DEVBUF); 2229 sc->dc_mi = m; 2230 } 2231 2232 if (sc->dc_srom) 2233 kfree(sc->dc_srom, M_DEVBUF); 2234 2235 return(0); 2236 } 2237 2238 /* 2239 * Initialize the transmit descriptors. 2240 */ 2241 static int 2242 dc_list_tx_init(struct dc_softc *sc) 2243 { 2244 struct dc_chain_data *cd; 2245 struct dc_list_data *ld; 2246 int i; 2247 2248 cd = &sc->dc_cdata; 2249 ld = sc->dc_ldata; 2250 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2251 if (i == (DC_TX_LIST_CNT - 1)) { 2252 ld->dc_tx_list[i].dc_next = 2253 vtophys(&ld->dc_tx_list[0]); 2254 } else { 2255 ld->dc_tx_list[i].dc_next = 2256 vtophys(&ld->dc_tx_list[i + 1]); 2257 } 2258 cd->dc_tx_chain[i] = NULL; 2259 ld->dc_tx_list[i].dc_data = 0; 2260 ld->dc_tx_list[i].dc_ctl = 0; 2261 } 2262 2263 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2264 2265 return(0); 2266 } 2267 2268 2269 /* 2270 * Initialize the RX descriptors and allocate mbufs for them. Note that 2271 * we arrange the descriptors in a closed ring, so that the last descriptor 2272 * points back to the first. 2273 */ 2274 static int 2275 dc_list_rx_init(struct dc_softc *sc) 2276 { 2277 struct dc_chain_data *cd; 2278 struct dc_list_data *ld; 2279 int i; 2280 2281 cd = &sc->dc_cdata; 2282 ld = sc->dc_ldata; 2283 2284 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2285 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 2286 return(ENOBUFS); 2287 if (i == (DC_RX_LIST_CNT - 1)) { 2288 ld->dc_rx_list[i].dc_next = 2289 vtophys(&ld->dc_rx_list[0]); 2290 } else { 2291 ld->dc_rx_list[i].dc_next = 2292 vtophys(&ld->dc_rx_list[i + 1]); 2293 } 2294 } 2295 2296 cd->dc_rx_prod = 0; 2297 2298 return(0); 2299 } 2300 2301 /* 2302 * Initialize an RX descriptor and attach an MBUF cluster. 2303 */ 2304 static int 2305 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m) 2306 { 2307 struct mbuf *m_new = NULL; 2308 struct dc_desc *c; 2309 2310 c = &sc->dc_ldata->dc_rx_list[i]; 2311 2312 if (m == NULL) { 2313 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 2314 if (m_new == NULL) 2315 return (ENOBUFS); 2316 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2317 } else { 2318 m_new = m; 2319 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2320 m_new->m_data = m_new->m_ext.ext_buf; 2321 } 2322 2323 m_adj(m_new, sizeof(u_int64_t)); 2324 2325 /* 2326 * If this is a PNIC chip, zero the buffer. This is part 2327 * of the workaround for the receive bug in the 82c168 and 2328 * 82c169 chips. 2329 */ 2330 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2331 bzero((char *)mtod(m_new, char *), m_new->m_len); 2332 2333 sc->dc_cdata.dc_rx_chain[i] = m_new; 2334 c->dc_data = vtophys(mtod(m_new, caddr_t)); 2335 c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; 2336 c->dc_status = DC_RXSTAT_OWN; 2337 2338 return(0); 2339 } 2340 2341 /* 2342 * Grrrrr. 2343 * The PNIC chip has a terrible bug in it that manifests itself during 2344 * periods of heavy activity. The exact mode of failure if difficult to 2345 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2346 * will happen on slow machines. The bug is that sometimes instead of 2347 * uploading one complete frame during reception, it uploads what looks 2348 * like the entire contents of its FIFO memory. The frame we want is at 2349 * the end of the whole mess, but we never know exactly how much data has 2350 * been uploaded, so salvaging the frame is hard. 2351 * 2352 * There is only one way to do it reliably, and it's disgusting. 2353 * Here's what we know: 2354 * 2355 * - We know there will always be somewhere between one and three extra 2356 * descriptors uploaded. 2357 * 2358 * - We know the desired received frame will always be at the end of the 2359 * total data upload. 2360 * 2361 * - We know the size of the desired received frame because it will be 2362 * provided in the length field of the status word in the last descriptor. 2363 * 2364 * Here's what we do: 2365 * 2366 * - When we allocate buffers for the receive ring, we bzero() them. 2367 * This means that we know that the buffer contents should be all 2368 * zeros, except for data uploaded by the chip. 2369 * 2370 * - We also force the PNIC chip to upload frames that include the 2371 * ethernet CRC at the end. 2372 * 2373 * - We gather all of the bogus frame data into a single buffer. 2374 * 2375 * - We then position a pointer at the end of this buffer and scan 2376 * backwards until we encounter the first non-zero byte of data. 2377 * This is the end of the received frame. We know we will encounter 2378 * some data at the end of the frame because the CRC will always be 2379 * there, so even if the sender transmits a packet of all zeros, 2380 * we won't be fooled. 2381 * 2382 * - We know the size of the actual received frame, so we subtract 2383 * that value from the current pointer location. This brings us 2384 * to the start of the actual received packet. 2385 * 2386 * - We copy this into an mbuf and pass it on, along with the actual 2387 * frame length. 2388 * 2389 * The performance hit is tremendous, but it beats dropping frames all 2390 * the time. 2391 */ 2392 2393 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2394 static void 2395 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2396 { 2397 struct dc_desc *cur_rx; 2398 struct dc_desc *c = NULL; 2399 struct mbuf *m = NULL; 2400 unsigned char *ptr; 2401 int i, total_len; 2402 u_int32_t rxstat = 0; 2403 2404 i = sc->dc_pnic_rx_bug_save; 2405 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2406 ptr = sc->dc_pnic_rx_buf; 2407 bzero(ptr, DC_RXLEN * 5); 2408 2409 /* Copy all the bytes from the bogus buffers. */ 2410 while (1) { 2411 c = &sc->dc_ldata->dc_rx_list[i]; 2412 rxstat = c->dc_status; 2413 m = sc->dc_cdata.dc_rx_chain[i]; 2414 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2415 ptr += DC_RXLEN; 2416 /* If this is the last buffer, break out. */ 2417 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2418 break; 2419 dc_newbuf(sc, i, m); 2420 DC_INC(i, DC_RX_LIST_CNT); 2421 } 2422 2423 /* Find the length of the actual receive frame. */ 2424 total_len = DC_RXBYTES(rxstat); 2425 2426 /* Scan backwards until we hit a non-zero byte. */ 2427 while(*ptr == 0x00) 2428 ptr--; 2429 2430 /* Round off. */ 2431 if ((uintptr_t)(ptr) & 0x3) 2432 ptr -= 1; 2433 2434 /* Now find the start of the frame. */ 2435 ptr -= total_len; 2436 if (ptr < sc->dc_pnic_rx_buf) 2437 ptr = sc->dc_pnic_rx_buf; 2438 2439 /* 2440 * Now copy the salvaged frame to the last mbuf and fake up 2441 * the status word to make it look like a successful 2442 * frame reception. 2443 */ 2444 dc_newbuf(sc, i, m); 2445 bcopy(ptr, mtod(m, char *), total_len); 2446 cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; 2447 2448 return; 2449 } 2450 2451 /* 2452 * This routine searches the RX ring for dirty descriptors in the 2453 * event that the rxeof routine falls out of sync with the chip's 2454 * current descriptor pointer. This may happen sometimes as a result 2455 * of a "no RX buffer available" condition that happens when the chip 2456 * consumes all of the RX buffers before the driver has a chance to 2457 * process the RX ring. This routine may need to be called more than 2458 * once to bring the driver back in sync with the chip, however we 2459 * should still be getting RX DONE interrupts to drive the search 2460 * for new packets in the RX ring, so we should catch up eventually. 2461 */ 2462 static int 2463 dc_rx_resync(struct dc_softc *sc) 2464 { 2465 int i, pos; 2466 struct dc_desc *cur_rx; 2467 2468 pos = sc->dc_cdata.dc_rx_prod; 2469 2470 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2471 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2472 if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) 2473 break; 2474 DC_INC(pos, DC_RX_LIST_CNT); 2475 } 2476 2477 /* If the ring really is empty, then just return. */ 2478 if (i == DC_RX_LIST_CNT) 2479 return(0); 2480 2481 /* We've fallen behing the chip: catch it. */ 2482 sc->dc_cdata.dc_rx_prod = pos; 2483 2484 return(EAGAIN); 2485 } 2486 2487 /* 2488 * A frame has been uploaded: pass the resulting mbuf chain up to 2489 * the higher level protocols. 2490 */ 2491 static void 2492 dc_rxeof(struct dc_softc *sc) 2493 { 2494 struct mbuf *m; 2495 struct ifnet *ifp; 2496 struct dc_desc *cur_rx; 2497 int i, total_len = 0; 2498 u_int32_t rxstat; 2499 2500 ifp = &sc->arpcom.ac_if; 2501 i = sc->dc_cdata.dc_rx_prod; 2502 2503 while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { 2504 2505 #ifdef DEVICE_POLLING 2506 if (ifp->if_flags & IFF_POLLING) { 2507 if (sc->rxcycles <= 0) 2508 break; 2509 sc->rxcycles--; 2510 } 2511 #endif /* DEVICE_POLLING */ 2512 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2513 rxstat = cur_rx->dc_status; 2514 m = sc->dc_cdata.dc_rx_chain[i]; 2515 total_len = DC_RXBYTES(rxstat); 2516 2517 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2518 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2519 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2520 sc->dc_pnic_rx_bug_save = i; 2521 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2522 DC_INC(i, DC_RX_LIST_CNT); 2523 continue; 2524 } 2525 dc_pnic_rx_bug_war(sc, i); 2526 rxstat = cur_rx->dc_status; 2527 total_len = DC_RXBYTES(rxstat); 2528 } 2529 } 2530 2531 sc->dc_cdata.dc_rx_chain[i] = NULL; 2532 2533 /* 2534 * If an error occurs, update stats, clear the 2535 * status word and leave the mbuf cluster in place: 2536 * it should simply get re-used next time this descriptor 2537 * comes up in the ring. However, don't report long 2538 * frames as errors since they could be vlans 2539 */ 2540 if ((rxstat & DC_RXSTAT_RXERR)){ 2541 if (!(rxstat & DC_RXSTAT_GIANT) || 2542 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2543 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2544 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2545 ifp->if_ierrors++; 2546 if (rxstat & DC_RXSTAT_COLLSEEN) 2547 ifp->if_collisions++; 2548 dc_newbuf(sc, i, m); 2549 if (rxstat & DC_RXSTAT_CRCERR) { 2550 DC_INC(i, DC_RX_LIST_CNT); 2551 continue; 2552 } else { 2553 dc_init(sc); 2554 return; 2555 } 2556 } 2557 } 2558 2559 /* No errors; receive the packet. */ 2560 total_len -= ETHER_CRC_LEN; 2561 2562 #ifdef __i386__ 2563 /* 2564 * On the x86 we do not have alignment problems, so try to 2565 * allocate a new buffer for the receive ring, and pass up 2566 * the one where the packet is already, saving the expensive 2567 * copy done in m_devget(). 2568 * If we are on an architecture with alignment problems, or 2569 * if the allocation fails, then use m_devget and leave the 2570 * existing buffer in the receive ring. 2571 */ 2572 if (dc_quick && dc_newbuf(sc, i, NULL) == 0) { 2573 m->m_pkthdr.rcvif = ifp; 2574 m->m_pkthdr.len = m->m_len = total_len; 2575 DC_INC(i, DC_RX_LIST_CNT); 2576 } else 2577 #endif 2578 { 2579 struct mbuf *m0; 2580 2581 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 2582 total_len + ETHER_ALIGN, 0, ifp, NULL); 2583 dc_newbuf(sc, i, m); 2584 DC_INC(i, DC_RX_LIST_CNT); 2585 if (m0 == NULL) { 2586 ifp->if_ierrors++; 2587 continue; 2588 } 2589 m_adj(m0, ETHER_ALIGN); 2590 m = m0; 2591 } 2592 2593 ifp->if_ipackets++; 2594 ifp->if_input(ifp, m); 2595 } 2596 2597 sc->dc_cdata.dc_rx_prod = i; 2598 } 2599 2600 /* 2601 * A frame was downloaded to the chip. It's safe for us to clean up 2602 * the list buffers. 2603 */ 2604 2605 static void 2606 dc_txeof(struct dc_softc *sc) 2607 { 2608 struct dc_desc *cur_tx = NULL; 2609 struct ifnet *ifp; 2610 int idx; 2611 2612 ifp = &sc->arpcom.ac_if; 2613 2614 /* 2615 * Go through our tx list and free mbufs for those 2616 * frames that have been transmitted. 2617 */ 2618 idx = sc->dc_cdata.dc_tx_cons; 2619 while(idx != sc->dc_cdata.dc_tx_prod) { 2620 u_int32_t txstat; 2621 2622 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2623 txstat = cur_tx->dc_status; 2624 2625 if (txstat & DC_TXSTAT_OWN) 2626 break; 2627 2628 if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || 2629 cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2630 if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2631 /* 2632 * Yes, the PNIC is so brain damaged 2633 * that it will sometimes generate a TX 2634 * underrun error while DMAing the RX 2635 * filter setup frame. If we detect this, 2636 * we have to send the setup frame again, 2637 * or else the filter won't be programmed 2638 * correctly. 2639 */ 2640 if (DC_IS_PNIC(sc)) { 2641 if (txstat & DC_TXSTAT_ERRSUM) 2642 dc_setfilt(sc); 2643 } 2644 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2645 } 2646 sc->dc_cdata.dc_tx_cnt--; 2647 DC_INC(idx, DC_TX_LIST_CNT); 2648 continue; 2649 } 2650 2651 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2652 /* 2653 * XXX: Why does my Xircom taunt me so? 2654 * For some reason Conexant chips like 2655 * setting the CARRLOST flag even when 2656 * the carrier is there. In CURRENT we 2657 * have the same problem for Xircom 2658 * cards ! 2659 */ 2660 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2661 sc->dc_pmode == DC_PMODE_MII && 2662 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2663 DC_TXSTAT_NOCARRIER))) 2664 txstat &= ~DC_TXSTAT_ERRSUM; 2665 } else { 2666 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2667 sc->dc_pmode == DC_PMODE_MII && 2668 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2669 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2670 txstat &= ~DC_TXSTAT_ERRSUM; 2671 } 2672 2673 if (txstat & DC_TXSTAT_ERRSUM) { 2674 ifp->if_oerrors++; 2675 if (txstat & DC_TXSTAT_EXCESSCOLL) 2676 ifp->if_collisions++; 2677 if (txstat & DC_TXSTAT_LATECOLL) 2678 ifp->if_collisions++; 2679 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2680 dc_init(sc); 2681 return; 2682 } 2683 } 2684 2685 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2686 2687 ifp->if_opackets++; 2688 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2689 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2690 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2691 } 2692 2693 sc->dc_cdata.dc_tx_cnt--; 2694 DC_INC(idx, DC_TX_LIST_CNT); 2695 } 2696 2697 if (idx != sc->dc_cdata.dc_tx_cons) { 2698 /* some buffers have been freed */ 2699 sc->dc_cdata.dc_tx_cons = idx; 2700 ifp->if_flags &= ~IFF_OACTIVE; 2701 } 2702 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2703 2704 return; 2705 } 2706 2707 static void 2708 dc_tick(void *xsc) 2709 { 2710 struct dc_softc *sc = xsc; 2711 struct ifnet *ifp = &sc->arpcom.ac_if; 2712 struct mii_data *mii; 2713 u_int32_t r; 2714 2715 lwkt_serialize_enter(ifp->if_serializer); 2716 2717 mii = device_get_softc(sc->dc_miibus); 2718 2719 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2720 if (sc->dc_flags & DC_21143_NWAY) { 2721 r = CSR_READ_4(sc, DC_10BTSTAT); 2722 if (IFM_SUBTYPE(mii->mii_media_active) == 2723 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2724 sc->dc_link = 0; 2725 mii_mediachg(mii); 2726 } 2727 if (IFM_SUBTYPE(mii->mii_media_active) == 2728 IFM_10_T && (r & DC_TSTAT_LS10)) { 2729 sc->dc_link = 0; 2730 mii_mediachg(mii); 2731 } 2732 if (sc->dc_link == 0) 2733 mii_tick(mii); 2734 } else { 2735 r = CSR_READ_4(sc, DC_ISR); 2736 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2737 sc->dc_cdata.dc_tx_cnt == 0) { 2738 mii_tick(mii); 2739 if (!(mii->mii_media_status & IFM_ACTIVE)) 2740 sc->dc_link = 0; 2741 } 2742 } 2743 } else { 2744 mii_tick(mii); 2745 } 2746 2747 /* 2748 * When the init routine completes, we expect to be able to send 2749 * packets right away, and in fact the network code will send a 2750 * gratuitous ARP the moment the init routine marks the interface 2751 * as running. However, even though the MAC may have been initialized, 2752 * there may be a delay of a few seconds before the PHY completes 2753 * autonegotiation and the link is brought up. Any transmissions 2754 * made during that delay will be lost. Dealing with this is tricky: 2755 * we can't just pause in the init routine while waiting for the 2756 * PHY to come ready since that would bring the whole system to 2757 * a screeching halt for several seconds. 2758 * 2759 * What we do here is prevent the TX start routine from sending 2760 * any packets until a link has been established. After the 2761 * interface has been initialized, the tick routine will poll 2762 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2763 * that time, packets will stay in the send queue, and once the 2764 * link comes up, they will be flushed out to the wire. 2765 */ 2766 if (!sc->dc_link) { 2767 mii_pollstat(mii); 2768 if (mii->mii_media_status & IFM_ACTIVE && 2769 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2770 sc->dc_link++; 2771 if (!ifq_is_empty(&ifp->if_snd)) 2772 dc_start(ifp); 2773 } 2774 } 2775 2776 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2777 callout_reset(&sc->dc_stat_timer, hz / 10, dc_tick, sc); 2778 else 2779 callout_reset(&sc->dc_stat_timer, hz, dc_tick, sc); 2780 2781 lwkt_serialize_exit(ifp->if_serializer); 2782 } 2783 2784 /* 2785 * A transmit underrun has occurred. Back off the transmit threshold, 2786 * or switch to store and forward mode if we have to. 2787 */ 2788 static void 2789 dc_tx_underrun(struct dc_softc *sc) 2790 { 2791 u_int32_t isr; 2792 int i; 2793 2794 if (DC_IS_DAVICOM(sc)) 2795 dc_init(sc); 2796 2797 if (DC_IS_INTEL(sc)) { 2798 /* 2799 * The real 21143 requires that the transmitter be idle 2800 * in order to change the transmit threshold or store 2801 * and forward state. 2802 */ 2803 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2804 2805 for (i = 0; i < DC_TIMEOUT; i++) { 2806 isr = CSR_READ_4(sc, DC_ISR); 2807 if (isr & DC_ISR_TX_IDLE) 2808 break; 2809 DELAY(10); 2810 } 2811 if (i == DC_TIMEOUT) { 2812 if_printf(&sc->arpcom.ac_if, 2813 "failed to force tx to idle state\n"); 2814 dc_init(sc); 2815 } 2816 } 2817 2818 if_printf(&sc->arpcom.ac_if, "TX underrun -- "); 2819 sc->dc_txthresh += DC_TXTHRESH_INC; 2820 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2821 printf("using store and forward mode\n"); 2822 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2823 } else { 2824 printf("increasing TX threshold\n"); 2825 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2826 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2827 } 2828 2829 if (DC_IS_INTEL(sc)) 2830 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2831 2832 return; 2833 } 2834 2835 #ifdef DEVICE_POLLING 2836 2837 static void 2838 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2839 { 2840 struct dc_softc *sc = ifp->if_softc; 2841 u_int32_t status; 2842 2843 switch(cmd) { 2844 case POLL_REGISTER: 2845 /* Disable interrupts */ 2846 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2847 break; 2848 case POLL_DEREGISTER: 2849 /* Re-enable interrupts. */ 2850 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2851 break; 2852 case POLL_ONLY: 2853 sc->rxcycles = count; 2854 dc_rxeof(sc); 2855 dc_txeof(sc); 2856 if ((ifp->if_flags & IFF_OACTIVE) == 0 && !ifq_is_empty(&ifp->if_snd)) 2857 dc_start(ifp); 2858 break; 2859 case POLL_AND_CHECK_STATUS: 2860 sc->rxcycles = count; 2861 dc_rxeof(sc); 2862 dc_txeof(sc); 2863 if ((ifp->if_flags & IFF_OACTIVE) == 0 && !ifq_is_empty(&ifp->if_snd)) 2864 dc_start(ifp); 2865 status = CSR_READ_4(sc, DC_ISR); 2866 status &= (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF| 2867 DC_ISR_TX_NOBUF|DC_ISR_TX_IDLE|DC_ISR_TX_UNDERRUN| 2868 DC_ISR_BUS_ERR); 2869 if (!status) 2870 break; 2871 /* ack what we have */ 2872 CSR_WRITE_4(sc, DC_ISR, status); 2873 2874 if (status & (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF) ) { 2875 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 2876 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 2877 2878 if (dc_rx_resync(sc)) 2879 dc_rxeof(sc); 2880 } 2881 /* restart transmit unit if necessary */ 2882 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 2883 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2884 2885 if (status & DC_ISR_TX_UNDERRUN) 2886 dc_tx_underrun(sc); 2887 2888 if (status & DC_ISR_BUS_ERR) { 2889 if_printf(ifp, "dc_poll: bus error\n"); 2890 dc_reset(sc); 2891 dc_init(sc); 2892 } 2893 break; 2894 } 2895 } 2896 #endif /* DEVICE_POLLING */ 2897 2898 static void 2899 dc_intr(void *arg) 2900 { 2901 struct dc_softc *sc; 2902 struct ifnet *ifp; 2903 u_int32_t status; 2904 2905 sc = arg; 2906 2907 if (sc->suspended) { 2908 return; 2909 } 2910 2911 ifp = &sc->arpcom.ac_if; 2912 2913 if ( (CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 2914 return ; 2915 2916 /* Suppress unwanted interrupts */ 2917 if (!(ifp->if_flags & IFF_UP)) { 2918 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 2919 dc_stop(sc); 2920 return; 2921 } 2922 2923 /* Disable interrupts. */ 2924 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 2925 2926 while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && 2927 status != 0xFFFFFFFF) { 2928 2929 CSR_WRITE_4(sc, DC_ISR, status); 2930 2931 if (status & DC_ISR_RX_OK) { 2932 int curpkts; 2933 curpkts = ifp->if_ipackets; 2934 dc_rxeof(sc); 2935 if (curpkts == ifp->if_ipackets) { 2936 while(dc_rx_resync(sc)) 2937 dc_rxeof(sc); 2938 } 2939 } 2940 2941 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 2942 dc_txeof(sc); 2943 2944 if (status & DC_ISR_TX_IDLE) { 2945 dc_txeof(sc); 2946 if (sc->dc_cdata.dc_tx_cnt) { 2947 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2948 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2949 } 2950 } 2951 2952 if (status & DC_ISR_TX_UNDERRUN) 2953 dc_tx_underrun(sc); 2954 2955 if ((status & DC_ISR_RX_WATDOGTIMEO) 2956 || (status & DC_ISR_RX_NOBUF)) { 2957 int curpkts; 2958 curpkts = ifp->if_ipackets; 2959 dc_rxeof(sc); 2960 if (curpkts == ifp->if_ipackets) { 2961 while(dc_rx_resync(sc)) 2962 dc_rxeof(sc); 2963 } 2964 } 2965 2966 if (status & DC_ISR_BUS_ERR) { 2967 dc_reset(sc); 2968 dc_init(sc); 2969 } 2970 } 2971 2972 /* Re-enable interrupts. */ 2973 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2974 2975 if (!ifq_is_empty(&ifp->if_snd)) 2976 dc_start(ifp); 2977 2978 return; 2979 } 2980 2981 /* 2982 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2983 * pointers to the fragment pointers. 2984 */ 2985 static int 2986 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 2987 { 2988 struct dc_desc *f = NULL; 2989 struct mbuf *m; 2990 int frag, cur, cnt = 0; 2991 2992 /* 2993 * Start packing the mbufs in this chain into 2994 * the fragment pointers. Stop when we run out 2995 * of fragments or hit the end of the mbuf chain. 2996 */ 2997 m = m_head; 2998 cur = frag = *txidx; 2999 3000 for (m = m_head; m != NULL; m = m->m_next) { 3001 if (m->m_len != 0) { 3002 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 3003 if (*txidx != sc->dc_cdata.dc_tx_prod && 3004 frag == (DC_TX_LIST_CNT - 1)) 3005 return(ENOBUFS); 3006 } 3007 if ((DC_TX_LIST_CNT - 3008 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) 3009 return(ENOBUFS); 3010 3011 f = &sc->dc_ldata->dc_tx_list[frag]; 3012 f->dc_ctl = DC_TXCTL_TLINK | m->m_len; 3013 if (cnt == 0) { 3014 f->dc_status = 0; 3015 f->dc_ctl |= DC_TXCTL_FIRSTFRAG; 3016 } else 3017 f->dc_status = DC_TXSTAT_OWN; 3018 f->dc_data = vtophys(mtod(m, vm_offset_t)); 3019 cur = frag; 3020 DC_INC(frag, DC_TX_LIST_CNT); 3021 cnt++; 3022 } 3023 } 3024 3025 if (m != NULL) 3026 return(ENOBUFS); 3027 3028 sc->dc_cdata.dc_tx_cnt += cnt; 3029 sc->dc_cdata.dc_tx_chain[cur] = m_head; 3030 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; 3031 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3032 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; 3033 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3034 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3035 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3036 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3037 sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; 3038 *txidx = frag; 3039 3040 return(0); 3041 } 3042 3043 /* 3044 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3045 * to the mbuf data regions directly in the transmit lists. We also save a 3046 * copy of the pointers since the transmit list fragment pointers are 3047 * physical addresses. 3048 */ 3049 3050 static void 3051 dc_start(struct ifnet *ifp) 3052 { 3053 struct dc_softc *sc; 3054 struct mbuf *m_head; 3055 struct mbuf *m_defragged; 3056 int idx, need_trans; 3057 3058 sc = ifp->if_softc; 3059 3060 if (!sc->dc_link) 3061 return; 3062 3063 if (ifp->if_flags & IFF_OACTIVE) 3064 return; 3065 3066 idx = sc->dc_cdata.dc_tx_prod; 3067 3068 need_trans = 0; 3069 while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3070 m_defragged = NULL; 3071 m_head = ifq_poll(&ifp->if_snd); 3072 if (m_head == NULL) 3073 break; 3074 3075 if (sc->dc_flags & DC_TX_COALESCE && 3076 (m_head->m_next != NULL || 3077 sc->dc_flags & DC_TX_ALIGN)){ 3078 /* 3079 * Check first if coalescing allows us to queue 3080 * the packet. We don't want to loose it if 3081 * the TX queue is full. 3082 */ 3083 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 3084 idx != sc->dc_cdata.dc_tx_prod && 3085 idx == (DC_TX_LIST_CNT - 1)) { 3086 ifp->if_flags |= IFF_OACTIVE; 3087 break; 3088 } 3089 if ((DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt) < 5) { 3090 ifp->if_flags |= IFF_OACTIVE; 3091 break; 3092 } 3093 3094 /* only coalesce if have >1 mbufs */ 3095 m_defragged = m_defrag_nofree(m_head, MB_DONTWAIT); 3096 if (m_defragged == NULL) { 3097 ifp->if_flags |= IFF_OACTIVE; 3098 break; 3099 } 3100 } 3101 3102 if (dc_encap(sc, (m_defragged ? m_defragged : m_head), &idx)) { 3103 if (m_defragged) { 3104 /* 3105 * Throw away the original packet if the 3106 * defragged packet could not be encapsulated, 3107 * as well as the defragged packet. 3108 */ 3109 ifq_dequeue(&ifp->if_snd, m_head); 3110 m_freem(m_head); 3111 m_freem(m_defragged); 3112 } 3113 ifp->if_flags |= IFF_OACTIVE; 3114 break; 3115 } 3116 3117 ifq_dequeue(&ifp->if_snd, m_head); 3118 3119 need_trans = 1; 3120 3121 /* 3122 * If there's a BPF listener, bounce a copy of this frame 3123 * to him. 3124 */ 3125 BPF_MTAP(ifp, (m_defragged ? m_defragged : m_head)); 3126 3127 /* 3128 * If we defragged the packet, m_head is not the one we 3129 * encapsulated so we can throw it away. 3130 */ 3131 if (m_defragged) 3132 m_freem(m_head); 3133 3134 if (sc->dc_flags & DC_TX_ONE) { 3135 ifp->if_flags |= IFF_OACTIVE; 3136 break; 3137 } 3138 } 3139 3140 if (!need_trans) 3141 return; 3142 3143 /* Transmit */ 3144 sc->dc_cdata.dc_tx_prod = idx; 3145 if (!(sc->dc_flags & DC_TX_POLL)) 3146 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3147 3148 /* 3149 * Set a timeout in case the chip goes out to lunch. 3150 */ 3151 ifp->if_timer = 5; 3152 } 3153 3154 static void 3155 dc_init(void *xsc) 3156 { 3157 struct dc_softc *sc = xsc; 3158 struct ifnet *ifp = &sc->arpcom.ac_if; 3159 struct mii_data *mii; 3160 3161 mii = device_get_softc(sc->dc_miibus); 3162 3163 /* 3164 * Cancel pending I/O and free all RX/TX buffers. 3165 */ 3166 dc_stop(sc); 3167 dc_reset(sc); 3168 3169 /* 3170 * Set cache alignment and burst length. 3171 */ 3172 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3173 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3174 else 3175 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 3176 /* 3177 * Evenly share the bus between receive and transmit process. 3178 */ 3179 if (DC_IS_INTEL(sc)) 3180 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3181 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3182 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3183 } else { 3184 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3185 } 3186 if (sc->dc_flags & DC_TX_POLL) 3187 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3188 switch(sc->dc_cachesize) { 3189 case 32: 3190 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3191 break; 3192 case 16: 3193 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3194 break; 3195 case 8: 3196 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3197 break; 3198 case 0: 3199 default: 3200 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3201 break; 3202 } 3203 3204 if (sc->dc_flags & DC_TX_STORENFWD) 3205 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3206 else { 3207 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3208 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3209 } else { 3210 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3211 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3212 } 3213 } 3214 3215 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3216 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3217 3218 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3219 /* 3220 * The app notes for the 98713 and 98715A say that 3221 * in order to have the chips operate properly, a magic 3222 * number must be written to CSR16. Macronix does not 3223 * document the meaning of these bits so there's no way 3224 * to know exactly what they do. The 98713 has a magic 3225 * number all its own; the rest all use a different one. 3226 */ 3227 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3228 if (sc->dc_type == DC_TYPE_98713) 3229 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3230 else 3231 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3232 } 3233 3234 if (DC_IS_XIRCOM(sc)) { 3235 /* 3236 * Setup General Purpose Port mode and data so the tulip 3237 * can talk to the MII. 3238 */ 3239 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3240 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3241 DELAY(10); 3242 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3243 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3244 DELAY(10); 3245 } 3246 3247 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3248 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3249 3250 /* Init circular RX list. */ 3251 if (dc_list_rx_init(sc) == ENOBUFS) { 3252 if_printf(ifp, "initialization failed: no " 3253 "memory for rx buffers\n"); 3254 dc_stop(sc); 3255 return; 3256 } 3257 3258 /* 3259 * Init tx descriptors. 3260 */ 3261 dc_list_tx_init(sc); 3262 3263 /* 3264 * Load the address of the RX list. 3265 */ 3266 CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); 3267 CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); 3268 3269 /* 3270 * Enable interrupts. 3271 */ 3272 #ifdef DEVICE_POLLING 3273 /* 3274 * ... but only if we are not polling, and make sure they are off in 3275 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3276 * after a reset. 3277 */ 3278 if (ifp->if_flags & IFF_POLLING) 3279 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3280 else 3281 #endif 3282 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3283 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3284 3285 /* Enable transmitter. */ 3286 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3287 3288 /* 3289 * If this is an Intel 21143 and we're not using the 3290 * MII port, program the LED control pins so we get 3291 * link and activity indications. 3292 */ 3293 if (sc->dc_flags & DC_TULIP_LEDS) { 3294 CSR_WRITE_4(sc, DC_WATCHDOG, 3295 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 3296 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3297 } 3298 3299 /* 3300 * Load the RX/multicast filter. We do this sort of late 3301 * because the filter programming scheme on the 21143 and 3302 * some clones requires DMAing a setup frame via the TX 3303 * engine, and we need the transmitter enabled for that. 3304 */ 3305 dc_setfilt(sc); 3306 3307 /* Enable receiver. */ 3308 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3309 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3310 3311 mii_mediachg(mii); 3312 dc_setcfg(sc, sc->dc_if_media); 3313 3314 ifp->if_flags |= IFF_RUNNING; 3315 ifp->if_flags &= ~IFF_OACTIVE; 3316 3317 /* Don't start the ticker if this is a homePNA link. */ 3318 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3319 sc->dc_link = 1; 3320 else { 3321 if (sc->dc_flags & DC_21143_NWAY) 3322 callout_reset(&sc->dc_stat_timer, hz/10, dc_tick, sc); 3323 else 3324 callout_reset(&sc->dc_stat_timer, hz, dc_tick, sc); 3325 } 3326 3327 return; 3328 } 3329 3330 /* 3331 * Set media options. 3332 */ 3333 static int 3334 dc_ifmedia_upd(struct ifnet *ifp) 3335 { 3336 struct dc_softc *sc; 3337 struct mii_data *mii; 3338 struct ifmedia *ifm; 3339 3340 sc = ifp->if_softc; 3341 mii = device_get_softc(sc->dc_miibus); 3342 mii_mediachg(mii); 3343 ifm = &mii->mii_media; 3344 3345 if (DC_IS_DAVICOM(sc) && 3346 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3347 dc_setcfg(sc, ifm->ifm_media); 3348 else 3349 sc->dc_link = 0; 3350 3351 return(0); 3352 } 3353 3354 /* 3355 * Report current media status. 3356 */ 3357 static void 3358 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3359 { 3360 struct dc_softc *sc; 3361 struct mii_data *mii; 3362 struct ifmedia *ifm; 3363 3364 sc = ifp->if_softc; 3365 mii = device_get_softc(sc->dc_miibus); 3366 mii_pollstat(mii); 3367 ifm = &mii->mii_media; 3368 if (DC_IS_DAVICOM(sc)) { 3369 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3370 ifmr->ifm_active = ifm->ifm_media; 3371 ifmr->ifm_status = 0; 3372 return; 3373 } 3374 } 3375 ifmr->ifm_active = mii->mii_media_active; 3376 ifmr->ifm_status = mii->mii_media_status; 3377 3378 return; 3379 } 3380 3381 static int 3382 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3383 { 3384 struct dc_softc *sc = ifp->if_softc; 3385 struct ifreq *ifr = (struct ifreq *) data; 3386 struct mii_data *mii; 3387 int error = 0; 3388 3389 switch(command) { 3390 case SIOCSIFFLAGS: 3391 if (ifp->if_flags & IFF_UP) { 3392 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3393 (IFF_PROMISC | IFF_ALLMULTI); 3394 if (ifp->if_flags & IFF_RUNNING) { 3395 if (need_setfilt) 3396 dc_setfilt(sc); 3397 } else { 3398 sc->dc_txthresh = 0; 3399 dc_init(sc); 3400 } 3401 } else { 3402 if (ifp->if_flags & IFF_RUNNING) 3403 dc_stop(sc); 3404 } 3405 sc->dc_if_flags = ifp->if_flags; 3406 error = 0; 3407 break; 3408 case SIOCADDMULTI: 3409 case SIOCDELMULTI: 3410 dc_setfilt(sc); 3411 error = 0; 3412 break; 3413 case SIOCGIFMEDIA: 3414 case SIOCSIFMEDIA: 3415 mii = device_get_softc(sc->dc_miibus); 3416 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3417 break; 3418 default: 3419 error = ether_ioctl(ifp, command, data); 3420 break; 3421 } 3422 3423 return(error); 3424 } 3425 3426 static void 3427 dc_watchdog(struct ifnet *ifp) 3428 { 3429 struct dc_softc *sc; 3430 3431 sc = ifp->if_softc; 3432 3433 ifp->if_oerrors++; 3434 if_printf(ifp, "watchdog timeout\n"); 3435 3436 dc_stop(sc); 3437 dc_reset(sc); 3438 dc_init(sc); 3439 3440 if (!ifq_is_empty(&ifp->if_snd)) 3441 dc_start(ifp); 3442 3443 return; 3444 } 3445 3446 /* 3447 * Stop the adapter and free any mbufs allocated to the 3448 * RX and TX lists. 3449 */ 3450 static void 3451 dc_stop(struct dc_softc *sc) 3452 { 3453 int i; 3454 struct ifnet *ifp; 3455 3456 ifp = &sc->arpcom.ac_if; 3457 ifp->if_timer = 0; 3458 3459 callout_stop(&sc->dc_stat_timer); 3460 3461 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3462 3463 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3464 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3465 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3466 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3467 sc->dc_link = 0; 3468 3469 /* 3470 * Free data in the RX lists. 3471 */ 3472 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3473 if (sc->dc_cdata.dc_rx_chain[i] != NULL) { 3474 m_freem(sc->dc_cdata.dc_rx_chain[i]); 3475 sc->dc_cdata.dc_rx_chain[i] = NULL; 3476 } 3477 } 3478 bzero((char *)&sc->dc_ldata->dc_rx_list, 3479 sizeof(sc->dc_ldata->dc_rx_list)); 3480 3481 /* 3482 * Free the TX list buffers. 3483 */ 3484 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3485 if (sc->dc_cdata.dc_tx_chain[i] != NULL) { 3486 if ((sc->dc_ldata->dc_tx_list[i].dc_ctl & 3487 DC_TXCTL_SETUP) || 3488 !(sc->dc_ldata->dc_tx_list[i].dc_ctl & 3489 DC_TXCTL_LASTFRAG)) { 3490 sc->dc_cdata.dc_tx_chain[i] = NULL; 3491 continue; 3492 } 3493 m_freem(sc->dc_cdata.dc_tx_chain[i]); 3494 sc->dc_cdata.dc_tx_chain[i] = NULL; 3495 } 3496 } 3497 3498 bzero((char *)&sc->dc_ldata->dc_tx_list, 3499 sizeof(sc->dc_ldata->dc_tx_list)); 3500 3501 return; 3502 } 3503 3504 /* 3505 * Stop all chip I/O so that the kernel's probe routines don't 3506 * get confused by errant DMAs when rebooting. 3507 */ 3508 static void 3509 dc_shutdown(device_t dev) 3510 { 3511 struct dc_softc *sc; 3512 struct ifnet *ifp; 3513 3514 sc = device_get_softc(dev); 3515 ifp = &sc->arpcom.ac_if; 3516 lwkt_serialize_enter(ifp->if_serializer); 3517 3518 dc_stop(sc); 3519 3520 lwkt_serialize_exit(ifp->if_serializer); 3521 } 3522 3523 /* 3524 * Device suspend routine. Stop the interface and save some PCI 3525 * settings in case the BIOS doesn't restore them properly on 3526 * resume. 3527 */ 3528 static int 3529 dc_suspend(device_t dev) 3530 { 3531 struct dc_softc *sc = device_get_softc(dev); 3532 struct ifnet *ifp = &sc->arpcom.ac_if; 3533 int i; 3534 lwkt_serialize_enter(ifp->if_serializer); 3535 3536 dc_stop(sc); 3537 for (i = 0; i < 5; i++) 3538 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3539 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3540 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3541 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3542 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3543 3544 sc->suspended = 1; 3545 3546 lwkt_serialize_exit(ifp->if_serializer); 3547 return (0); 3548 } 3549 3550 /* 3551 * Device resume routine. Restore some PCI settings in case the BIOS 3552 * doesn't, re-enable busmastering, and restart the interface if 3553 * appropriate. 3554 */ 3555 static int 3556 dc_resume(device_t dev) 3557 { 3558 struct dc_softc *sc = device_get_softc(dev); 3559 struct ifnet *ifp = &sc->arpcom.ac_if; 3560 int i; 3561 3562 lwkt_serialize_enter(ifp->if_serializer); 3563 dc_acpi(dev); 3564 3565 /* better way to do this? */ 3566 for (i = 0; i < 5; i++) 3567 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3568 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3569 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3570 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3571 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3572 3573 /* reenable busmastering */ 3574 pci_enable_busmaster(dev); 3575 pci_enable_io(dev, DC_RES); 3576 3577 /* reinitialize interface if necessary */ 3578 if (ifp->if_flags & IFF_UP) 3579 dc_init(sc); 3580 3581 sc->suspended = 0; 3582 lwkt_serialize_exit(ifp->if_serializer); 3583 3584 return (0); 3585 } 3586 3587 static uint32_t 3588 dc_mchash_xircom(struct dc_softc *sc, const uint8_t *addr) 3589 { 3590 uint32_t crc; 3591 3592 /* Compute CRC for the address value. */ 3593 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 3594 3595 if ((crc & 0x180) == 0x180) 3596 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); 3597 else 3598 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + (12 << 4)); 3599 } 3600