1 /* 2 * Copyright (c) 2004 3 * Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 4 * 5 * Copyright (c) 1997, 1998-2003 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $ 36 * $DragonFly: src/sys/dev/netif/re/if_re.c,v 1.44 2008/06/25 11:02:33 sephe Exp $ 37 */ 38 39 /* 40 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 41 * 42 * Written by Bill Paul <wpaul@windriver.com> 43 * Senior Networking Software Engineer 44 * Wind River Systems 45 */ 46 47 /* 48 * This driver is designed to support RealTek's next generation of 49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 50 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 51 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 52 * 53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 54 * with the older 8139 family, however it also supports a special 55 * C+ mode of operation that provides several new performance enhancing 56 * features. These include: 57 * 58 * o Descriptor based DMA mechanism. Each descriptor represents 59 * a single packet fragment. Data buffers may be aligned on 60 * any byte boundary. 61 * 62 * o 64-bit DMA 63 * 64 * o TCP/IP checksum offload for both RX and TX 65 * 66 * o High and normal priority transmit DMA rings 67 * 68 * o VLAN tag insertion and extraction 69 * 70 * o TCP large send (segmentation offload) 71 * 72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 73 * programming API is fairly straightforward. The RX filtering, EEPROM 74 * access and PHY access is the same as it is on the older 8139 series 75 * chips. 76 * 77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 78 * same programming API and feature set as the 8139C+ with the following 79 * differences and additions: 80 * 81 * o 1000Mbps mode 82 * 83 * o Jumbo frames 84 * 85 * o GMII and TBI ports/registers for interfacing with copper 86 * or fiber PHYs 87 * 88 * o RX and TX DMA rings can have up to 1024 descriptors 89 * (the 8139C+ allows a maximum of 64) 90 * 91 * o Slight differences in register layout from the 8139C+ 92 * 93 * The TX start and timer interrupt registers are at different locations 94 * on the 8169 than they are on the 8139C+. Also, the status word in the 95 * RX descriptor has a slightly different bit layout. The 8169 does not 96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 97 * copper gigE PHY. 98 * 99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 100 * (the 'S' stands for 'single-chip'). These devices have the same 101 * programming API as the older 8169, but also have some vendor-specific 102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 104 * 105 * This driver takes advantage of the RX and TX checksum offload and 106 * VLAN tag insertion/extraction features. It also implements TX 107 * interrupt moderation using the timer interrupt registers, which 108 * significantly reduces TX interrupt load. There is also support 109 * for jumbo frames, however the 8169/8169S/8110S can not transmit 110 * jumbo frames larger than 7440, so the max MTU possible with this 111 * driver is 7422 bytes. 112 */ 113 114 #include "opt_polling.h" 115 #include "opt_ethernet.h" 116 117 #include <sys/param.h> 118 #include <sys/bus.h> 119 #include <sys/endian.h> 120 #include <sys/kernel.h> 121 #include <sys/interrupt.h> 122 #include <sys/malloc.h> 123 #include <sys/mbuf.h> 124 #include <sys/rman.h> 125 #include <sys/serialize.h> 126 #include <sys/socket.h> 127 #include <sys/sockio.h> 128 #include <sys/sysctl.h> 129 130 #include <net/bpf.h> 131 #include <net/ethernet.h> 132 #include <net/if.h> 133 #include <net/ifq_var.h> 134 #include <net/if_arp.h> 135 #include <net/if_dl.h> 136 #include <net/if_media.h> 137 #include <net/if_types.h> 138 #include <net/vlan/if_vlan_var.h> 139 #include <net/vlan/if_vlan_ether.h> 140 141 #include <dev/netif/mii_layer/mii.h> 142 #include <dev/netif/mii_layer/miivar.h> 143 144 #include <bus/pci/pcidevs.h> 145 #include <bus/pci/pcireg.h> 146 #include <bus/pci/pcivar.h> 147 148 /* "device miibus" required. See GENERIC if you get errors here. */ 149 #include "miibus_if.h" 150 151 #include <dev/netif/re/if_rereg.h> 152 #include <dev/netif/re/if_revar.h> 153 154 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 155 #if 0 156 #define RE_DISABLE_HWCSUM 157 #endif 158 159 /* 160 * Various supported device vendors/types and their names. 161 */ 162 static const struct re_type re_devs[] = { 163 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T, RE_HWREV_8169S, 164 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 165 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8139, RE_HWREV_8139CPLUS, 166 "RealTek 8139C+ 10/100BaseTX" }, 167 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8101E, RE_HWREV_8101E, 168 "RealTek 8101E PCIe 10/100baseTX" }, 169 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, RE_HWREV_8168_SPIN1, 170 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 171 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, RE_HWREV_8168_SPIN2, 172 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 173 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, RE_HWREV_8168_SPIN3, 174 "RealTek 8168B/8111B PCIe Gigabit Ethernet" }, 175 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, RE_HWREV_8168C, 176 "RealTek 8168C/8111C PCIe Gigabit Ethernet" }, 177 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8169, 178 "RealTek 8169 Gigabit Ethernet" }, 179 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8169S, 180 "RealTek 8169S Single-chip Gigabit Ethernet" }, 181 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8169_8110SB, 182 "RealTek 8169SB/8110SB Single-chip Gigabit Ethernet" }, 183 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8169_8110SC, 184 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 185 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169SC, RE_HWREV_8169_8110SC, 186 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 187 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8110S, 188 "RealTek 8110S Single-chip Gigabit Ethernet" }, 189 { PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT, RE_HWREV_8169S, 190 "Corega CG-LAPCIGT Gigabit Ethernet" }, 191 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032, RE_HWREV_8169S, 192 "Linksys EG1032 Gigabit Ethernet" }, 193 { PCI_VENDOR_USR2, PCI_PRODUCT_USR2_997902, RE_HWREV_8169S, 194 "US Robotics 997902 Gigabit Ethernet" }, 195 { 0, 0, 0, NULL } 196 }; 197 198 static const struct re_hwrev re_hwrevs[] = { 199 { RE_HWREV_8139CPLUS, RE_8139CPLUS, RE_F_HASMPC, "C+" }, 200 { RE_HWREV_8168_SPIN1, RE_8169, RE_F_PCIE, "8168" }, 201 { RE_HWREV_8168_SPIN2, RE_8169, RE_F_PCIE, "8168" }, 202 { RE_HWREV_8168_SPIN3, RE_8169, RE_F_PCIE, "8168" }, 203 { RE_HWREV_8168C, RE_8169, RE_F_PCIE, "8168C" }, 204 { RE_HWREV_8169, RE_8169, RE_F_HASMPC, "8169" }, 205 { RE_HWREV_8169S, RE_8169, RE_F_HASMPC, "8169S" }, 206 { RE_HWREV_8110S, RE_8169, RE_F_HASMPC, "8110S" }, 207 { RE_HWREV_8169_8110SB, RE_8169, RE_F_HASMPC, "8169SB" }, 208 { RE_HWREV_8169_8110SC, RE_8169, 0, "8169SC" }, 209 { RE_HWREV_8100E, RE_8169, RE_F_HASMPC, "8100E" }, 210 { RE_HWREV_8101E, RE_8169, RE_F_PCIE, "8101E" }, 211 { 0, 0, 0, NULL } 212 }; 213 214 static int re_probe(device_t); 215 static int re_attach(device_t); 216 static int re_detach(device_t); 217 218 static int re_encap(struct re_softc *, struct mbuf **, int *, int *); 219 220 static void re_dma_map_addr(void *, bus_dma_segment_t *, int, int); 221 static void re_dma_map_desc(void *, bus_dma_segment_t *, int, 222 bus_size_t, int); 223 static int re_allocmem(device_t, struct re_softc *); 224 static int re_newbuf(struct re_softc *, int, struct mbuf *); 225 static int re_rx_list_init(struct re_softc *); 226 static int re_tx_list_init(struct re_softc *); 227 static void re_rxeof(struct re_softc *); 228 static void re_txeof(struct re_softc *); 229 static void re_intr(void *); 230 static void re_tick(void *); 231 static void re_tick_serialized(void *); 232 static void re_start(struct ifnet *); 233 static int re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 234 static void re_init(void *); 235 static void re_stop(struct re_softc *); 236 static void re_watchdog(struct ifnet *); 237 static int re_suspend(device_t); 238 static int re_resume(device_t); 239 static void re_shutdown(device_t); 240 static int re_ifmedia_upd(struct ifnet *); 241 static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 242 243 static void re_eeprom_putbyte(struct re_softc *, int); 244 static void re_eeprom_getword(struct re_softc *, int, u_int16_t *); 245 static void re_read_eeprom(struct re_softc *, caddr_t, int, int); 246 static int re_gmii_readreg(device_t, int, int); 247 static int re_gmii_writereg(device_t, int, int, int); 248 249 static int re_miibus_readreg(device_t, int, int); 250 static int re_miibus_writereg(device_t, int, int, int); 251 static void re_miibus_statchg(device_t); 252 253 static void re_setmulti(struct re_softc *); 254 static void re_reset(struct re_softc *); 255 256 #ifdef RE_DIAG 257 static int re_diag(struct re_softc *); 258 #endif 259 260 #ifdef DEVICE_POLLING 261 static void re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 262 #endif 263 264 static int re_sysctl_tx_moderation(SYSCTL_HANDLER_ARGS); 265 266 static device_method_t re_methods[] = { 267 /* Device interface */ 268 DEVMETHOD(device_probe, re_probe), 269 DEVMETHOD(device_attach, re_attach), 270 DEVMETHOD(device_detach, re_detach), 271 DEVMETHOD(device_suspend, re_suspend), 272 DEVMETHOD(device_resume, re_resume), 273 DEVMETHOD(device_shutdown, re_shutdown), 274 275 /* bus interface */ 276 DEVMETHOD(bus_print_child, bus_generic_print_child), 277 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 278 279 /* MII interface */ 280 DEVMETHOD(miibus_readreg, re_miibus_readreg), 281 DEVMETHOD(miibus_writereg, re_miibus_writereg), 282 DEVMETHOD(miibus_statchg, re_miibus_statchg), 283 284 { 0, 0 } 285 }; 286 287 static driver_t re_driver = { 288 "re", 289 re_methods, 290 sizeof(struct re_softc) 291 }; 292 293 static devclass_t re_devclass; 294 295 DECLARE_DUMMY_MODULE(if_re); 296 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, 0, 0); 297 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, 0, 0); 298 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 299 300 #define EE_SET(x) \ 301 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) | (x)) 302 303 #define EE_CLR(x) \ 304 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) & ~(x)) 305 306 /* 307 * Send a read command and address to the EEPROM, check for ACK. 308 */ 309 static void 310 re_eeprom_putbyte(struct re_softc *sc, int addr) 311 { 312 int d, i; 313 314 d = addr | (RE_9346_READ << sc->re_eewidth); 315 316 /* 317 * Feed in each bit and strobe the clock. 318 */ 319 for (i = 1 << (sc->re_eewidth + 3); i; i >>= 1) { 320 if (d & i) 321 EE_SET(RE_EE_DATAIN); 322 else 323 EE_CLR(RE_EE_DATAIN); 324 DELAY(100); 325 EE_SET(RE_EE_CLK); 326 DELAY(150); 327 EE_CLR(RE_EE_CLK); 328 DELAY(100); 329 } 330 } 331 332 /* 333 * Read a word of data stored in the EEPROM at address 'addr.' 334 */ 335 static void 336 re_eeprom_getword(struct re_softc *sc, int addr, uint16_t *dest) 337 { 338 int i; 339 uint16_t word = 0; 340 341 /* 342 * Send address of word we want to read. 343 */ 344 re_eeprom_putbyte(sc, addr); 345 346 /* 347 * Start reading bits from EEPROM. 348 */ 349 for (i = 0x8000; i != 0; i >>= 1) { 350 EE_SET(RE_EE_CLK); 351 DELAY(100); 352 if (CSR_READ_1(sc, RE_EECMD) & RE_EE_DATAOUT) 353 word |= i; 354 EE_CLR(RE_EE_CLK); 355 DELAY(100); 356 } 357 358 *dest = word; 359 } 360 361 /* 362 * Read a sequence of words from the EEPROM. 363 */ 364 static void 365 re_read_eeprom(struct re_softc *sc, caddr_t dest, int off, int cnt) 366 { 367 int i; 368 uint16_t word = 0, *ptr; 369 370 CSR_SETBIT_1(sc, RE_EECMD, RE_EEMODE_PROGRAM); 371 DELAY(100); 372 373 for (i = 0; i < cnt; i++) { 374 CSR_SETBIT_1(sc, RE_EECMD, RE_EE_SEL); 375 re_eeprom_getword(sc, off + i, &word); 376 CSR_CLRBIT_1(sc, RE_EECMD, RE_EE_SEL); 377 ptr = (uint16_t *)(dest + (i * 2)); 378 *ptr = word; 379 } 380 381 CSR_CLRBIT_1(sc, RE_EECMD, RE_EEMODE_PROGRAM); 382 } 383 384 static int 385 re_gmii_readreg(device_t dev, int phy, int reg) 386 { 387 struct re_softc *sc = device_get_softc(dev); 388 u_int32_t rval; 389 int i; 390 391 if (phy != 1) 392 return(0); 393 394 /* Let the rgephy driver read the GMEDIASTAT register */ 395 396 if (reg == RE_GMEDIASTAT) 397 return(CSR_READ_1(sc, RE_GMEDIASTAT)); 398 399 CSR_WRITE_4(sc, RE_PHYAR, reg << 16); 400 DELAY(1000); 401 402 for (i = 0; i < RE_TIMEOUT; i++) { 403 rval = CSR_READ_4(sc, RE_PHYAR); 404 if (rval & RE_PHYAR_BUSY) 405 break; 406 DELAY(100); 407 } 408 409 if (i == RE_TIMEOUT) { 410 device_printf(dev, "PHY read failed\n"); 411 return(0); 412 } 413 414 return(rval & RE_PHYAR_PHYDATA); 415 } 416 417 static int 418 re_gmii_writereg(device_t dev, int phy, int reg, int data) 419 { 420 struct re_softc *sc = device_get_softc(dev); 421 uint32_t rval; 422 int i; 423 424 CSR_WRITE_4(sc, RE_PHYAR, 425 (reg << 16) | (data & RE_PHYAR_PHYDATA) | RE_PHYAR_BUSY); 426 DELAY(1000); 427 428 for (i = 0; i < RE_TIMEOUT; i++) { 429 rval = CSR_READ_4(sc, RE_PHYAR); 430 if ((rval & RE_PHYAR_BUSY) == 0) 431 break; 432 DELAY(100); 433 } 434 435 if (i == RE_TIMEOUT) 436 device_printf(dev, "PHY write failed\n"); 437 438 return(0); 439 } 440 441 static int 442 re_miibus_readreg(device_t dev, int phy, int reg) 443 { 444 struct re_softc *sc = device_get_softc(dev); 445 uint16_t rval = 0; 446 uint16_t re8139_reg = 0; 447 448 if (sc->re_type == RE_8169) { 449 rval = re_gmii_readreg(dev, phy, reg); 450 return(rval); 451 } 452 453 /* Pretend the internal PHY is only at address 0 */ 454 if (phy) 455 return(0); 456 457 switch(reg) { 458 case MII_BMCR: 459 re8139_reg = RE_BMCR; 460 break; 461 case MII_BMSR: 462 re8139_reg = RE_BMSR; 463 break; 464 case MII_ANAR: 465 re8139_reg = RE_ANAR; 466 break; 467 case MII_ANER: 468 re8139_reg = RE_ANER; 469 break; 470 case MII_ANLPAR: 471 re8139_reg = RE_LPAR; 472 break; 473 case MII_PHYIDR1: 474 case MII_PHYIDR2: 475 return(0); 476 /* 477 * Allow the rlphy driver to read the media status 478 * register. If we have a link partner which does not 479 * support NWAY, this is the register which will tell 480 * us the results of parallel detection. 481 */ 482 case RE_MEDIASTAT: 483 return(CSR_READ_1(sc, RE_MEDIASTAT)); 484 default: 485 device_printf(dev, "bad phy register\n"); 486 return(0); 487 } 488 rval = CSR_READ_2(sc, re8139_reg); 489 if (sc->re_type == RE_8139CPLUS && re8139_reg == RE_BMCR) { 490 /* 8139C+ has different bit layout. */ 491 rval &= ~(BMCR_LOOP | BMCR_ISO); 492 } 493 return(rval); 494 } 495 496 static int 497 re_miibus_writereg(device_t dev, int phy, int reg, int data) 498 { 499 struct re_softc *sc= device_get_softc(dev); 500 u_int16_t re8139_reg = 0; 501 502 if (sc->re_type == RE_8169) 503 return(re_gmii_writereg(dev, phy, reg, data)); 504 505 /* Pretend the internal PHY is only at address 0 */ 506 if (phy) 507 return(0); 508 509 switch(reg) { 510 case MII_BMCR: 511 re8139_reg = RE_BMCR; 512 if (sc->re_type == RE_8139CPLUS) { 513 /* 8139C+ has different bit layout. */ 514 data &= ~(BMCR_LOOP | BMCR_ISO); 515 } 516 break; 517 case MII_BMSR: 518 re8139_reg = RE_BMSR; 519 break; 520 case MII_ANAR: 521 re8139_reg = RE_ANAR; 522 break; 523 case MII_ANER: 524 re8139_reg = RE_ANER; 525 break; 526 case MII_ANLPAR: 527 re8139_reg = RE_LPAR; 528 break; 529 case MII_PHYIDR1: 530 case MII_PHYIDR2: 531 return(0); 532 default: 533 device_printf(dev, "bad phy register\n"); 534 return(0); 535 } 536 CSR_WRITE_2(sc, re8139_reg, data); 537 return(0); 538 } 539 540 static void 541 re_miibus_statchg(device_t dev) 542 { 543 } 544 545 /* 546 * Program the 64-bit multicast hash filter. 547 */ 548 static void 549 re_setmulti(struct re_softc *sc) 550 { 551 struct ifnet *ifp = &sc->arpcom.ac_if; 552 int h = 0; 553 uint32_t hashes[2] = { 0, 0 }; 554 struct ifmultiaddr *ifma; 555 uint32_t rxfilt; 556 int mcnt = 0; 557 558 rxfilt = CSR_READ_4(sc, RE_RXCFG); 559 560 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 561 rxfilt |= RE_RXCFG_RX_MULTI; 562 CSR_WRITE_4(sc, RE_RXCFG, rxfilt); 563 CSR_WRITE_4(sc, RE_MAR0, 0xFFFFFFFF); 564 CSR_WRITE_4(sc, RE_MAR4, 0xFFFFFFFF); 565 return; 566 } 567 568 /* first, zot all the existing hash bits */ 569 CSR_WRITE_4(sc, RE_MAR0, 0); 570 CSR_WRITE_4(sc, RE_MAR4, 0); 571 572 /* now program new ones */ 573 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 574 if (ifma->ifma_addr->sa_family != AF_LINK) 575 continue; 576 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 577 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 578 if (h < 32) 579 hashes[0] |= (1 << h); 580 else 581 hashes[1] |= (1 << (h - 32)); 582 mcnt++; 583 } 584 585 if (mcnt) 586 rxfilt |= RE_RXCFG_RX_MULTI; 587 else 588 rxfilt &= ~RE_RXCFG_RX_MULTI; 589 590 CSR_WRITE_4(sc, RE_RXCFG, rxfilt); 591 592 /* 593 * For some unfathomable reason, RealTek decided to reverse 594 * the order of the multicast hash registers in the PCI Express 595 * parts. This means we have to write the hash pattern in reverse 596 * order for those devices. 597 */ 598 if (sc->re_flags & RE_F_PCIE) { 599 CSR_WRITE_4(sc, RE_MAR0, bswap32(hashes[0])); 600 CSR_WRITE_4(sc, RE_MAR4, bswap32(hashes[1])); 601 } else { 602 CSR_WRITE_4(sc, RE_MAR0, hashes[0]); 603 CSR_WRITE_4(sc, RE_MAR4, hashes[1]); 604 } 605 } 606 607 static void 608 re_reset(struct re_softc *sc) 609 { 610 int i; 611 612 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_RESET); 613 614 for (i = 0; i < RE_TIMEOUT; i++) { 615 DELAY(10); 616 if ((CSR_READ_1(sc, RE_COMMAND) & RE_CMD_RESET) == 0) 617 break; 618 } 619 if (i == RE_TIMEOUT) 620 if_printf(&sc->arpcom.ac_if, "reset never completed!\n"); 621 622 CSR_WRITE_1(sc, 0x82, 1); 623 } 624 625 #ifdef RE_DIAG 626 /* 627 * The following routine is designed to test for a defect on some 628 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 629 * lines connected to the bus, however for a 32-bit only card, they 630 * should be pulled high. The result of this defect is that the 631 * NIC will not work right if you plug it into a 64-bit slot: DMA 632 * operations will be done with 64-bit transfers, which will fail 633 * because the 64-bit data lines aren't connected. 634 * 635 * There's no way to work around this (short of talking a soldering 636 * iron to the board), however we can detect it. The method we use 637 * here is to put the NIC into digital loopback mode, set the receiver 638 * to promiscuous mode, and then try to send a frame. We then compare 639 * the frame data we sent to what was received. If the data matches, 640 * then the NIC is working correctly, otherwise we know the user has 641 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 642 * slot. In the latter case, there's no way the NIC can work correctly, 643 * so we print out a message on the console and abort the device attach. 644 */ 645 646 static int 647 re_diag(struct re_softc *sc) 648 { 649 struct ifnet *ifp = &sc->arpcom.ac_if; 650 struct mbuf *m0; 651 struct ether_header *eh; 652 struct re_desc *cur_rx; 653 uint16_t status; 654 uint32_t rxstat; 655 int total_len, i, error = 0, phyaddr; 656 uint8_t dst[ETHER_ADDR_LEN] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 657 uint8_t src[ETHER_ADDR_LEN] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 658 659 /* Allocate a single mbuf */ 660 661 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 662 if (m0 == NULL) 663 return(ENOBUFS); 664 665 /* 666 * Initialize the NIC in test mode. This sets the chip up 667 * so that it can send and receive frames, but performs the 668 * following special functions: 669 * - Puts receiver in promiscuous mode 670 * - Enables digital loopback mode 671 * - Leaves interrupts turned off 672 */ 673 674 ifp->if_flags |= IFF_PROMISC; 675 sc->re_testmode = 1; 676 re_reset(sc); 677 re_init(sc); 678 sc->re_link = 1; 679 if (sc->re_type == RE_8169) 680 phyaddr = 1; 681 else 682 phyaddr = 0; 683 684 re_miibus_writereg(sc->re_dev, phyaddr, MII_BMCR, BMCR_RESET); 685 for (i = 0; i < RE_TIMEOUT; i++) { 686 status = re_miibus_readreg(sc->re_dev, phyaddr, MII_BMCR); 687 if (!(status & BMCR_RESET)) 688 break; 689 } 690 691 re_miibus_writereg(sc->re_dev, phyaddr, MII_BMCR, BMCR_LOOP); 692 CSR_WRITE_2(sc, RE_ISR, RE_INTRS_DIAG); 693 694 DELAY(100000); 695 696 /* Put some data in the mbuf */ 697 698 eh = mtod(m0, struct ether_header *); 699 bcopy (dst, eh->ether_dhost, ETHER_ADDR_LEN); 700 bcopy (src, eh->ether_shost, ETHER_ADDR_LEN); 701 eh->ether_type = htons(ETHERTYPE_IP); 702 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 703 704 /* 705 * Queue the packet, start transmission. 706 * Note: ifq_handoff() ultimately calls re_start() for us. 707 */ 708 709 CSR_WRITE_2(sc, RE_ISR, 0xFFFF); 710 error = ifq_handoff(ifp, m0, NULL); 711 if (error) { 712 m0 = NULL; 713 goto done; 714 } 715 m0 = NULL; 716 717 /* Wait for it to propagate through the chip */ 718 719 DELAY(100000); 720 for (i = 0; i < RE_TIMEOUT; i++) { 721 status = CSR_READ_2(sc, RE_ISR); 722 CSR_WRITE_2(sc, RE_ISR, status); 723 if ((status & (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) == 724 (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) 725 break; 726 DELAY(10); 727 } 728 729 if (i == RE_TIMEOUT) { 730 if_printf(ifp, "diagnostic failed to receive packet " 731 "in loopback mode\n"); 732 error = EIO; 733 goto done; 734 } 735 736 /* 737 * The packet should have been dumped into the first 738 * entry in the RX DMA ring. Grab it from there. 739 */ 740 741 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 742 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD); 743 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0], 744 BUS_DMASYNC_POSTWRITE); 745 bus_dmamap_unload(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0]); 746 747 m0 = sc->re_ldata.re_rx_mbuf[0]; 748 sc->re_ldata.re_rx_mbuf[0] = NULL; 749 eh = mtod(m0, struct ether_header *); 750 751 cur_rx = &sc->re_ldata.re_rx_list[0]; 752 total_len = RE_RXBYTES(cur_rx); 753 rxstat = le32toh(cur_rx->re_cmdstat); 754 755 if (total_len != ETHER_MIN_LEN) { 756 if_printf(ifp, "diagnostic failed, received short packet\n"); 757 error = EIO; 758 goto done; 759 } 760 761 /* Test that the received packet data matches what we sent. */ 762 763 if (bcmp(eh->ether_dhost, dst, ETHER_ADDR_LEN) || 764 bcmp(eh->ether_shost, &src, ETHER_ADDR_LEN) || 765 be16toh(eh->ether_type) != ETHERTYPE_IP) { 766 if_printf(ifp, "WARNING, DMA FAILURE!\n"); 767 if_printf(ifp, "expected TX data: %6D/%6D/0x%x\n", 768 dst, ":", src, ":", ETHERTYPE_IP); 769 if_printf(ifp, "received RX data: %6D/%6D/0x%x\n", 770 eh->ether_dhost, ":", eh->ether_shost, ":", 771 ntohs(eh->ether_type)); 772 if_printf(ifp, "You may have a defective 32-bit NIC plugged " 773 "into a 64-bit PCI slot.\n"); 774 if_printf(ifp, "Please re-install the NIC in a 32-bit slot " 775 "for proper operation.\n"); 776 if_printf(ifp, "Read the re(4) man page for more details.\n"); 777 error = EIO; 778 } 779 780 done: 781 /* Turn interface off, release resources */ 782 783 sc->re_testmode = 0; 784 sc->re_link = 0; 785 ifp->if_flags &= ~IFF_PROMISC; 786 re_stop(sc); 787 if (m0 != NULL) 788 m_freem(m0); 789 790 return (error); 791 } 792 #endif /* RE_DIAG */ 793 794 /* 795 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 796 * IDs against our list and return a device name if we find a match. 797 */ 798 static int 799 re_probe(device_t dev) 800 { 801 const struct re_type *t; 802 struct re_softc *sc; 803 int rid; 804 uint32_t hwrev; 805 uint16_t vendor, product; 806 807 t = re_devs; 808 809 vendor = pci_get_vendor(dev); 810 product = pci_get_device(dev); 811 812 /* 813 * Only attach to rev.3 of the Linksys EG1032 adapter. 814 * Rev.2 is supported by sk(4). 815 */ 816 if (vendor == PCI_VENDOR_LINKSYS && 817 product == PCI_PRODUCT_LINKSYS_EG1032 && 818 pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3) 819 return ENXIO; 820 821 for (t = re_devs; t->re_name != NULL; t++) { 822 if (product == t->re_did && vendor == t->re_vid) 823 break; 824 } 825 826 /* 827 * Check if we found a RealTek device. 828 */ 829 if (t->re_name == NULL) 830 return(ENXIO); 831 832 /* 833 * Temporarily map the I/O space so we can read the chip ID register. 834 */ 835 sc = kmalloc(sizeof(*sc), M_TEMP, M_WAITOK | M_ZERO); 836 rid = RE_PCI_LOIO; 837 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 838 RF_ACTIVE); 839 if (sc->re_res == NULL) { 840 device_printf(dev, "couldn't map ports/memory\n"); 841 kfree(sc, M_TEMP); 842 return(ENXIO); 843 } 844 845 sc->re_btag = rman_get_bustag(sc->re_res); 846 sc->re_bhandle = rman_get_bushandle(sc->re_res); 847 848 hwrev = CSR_READ_4(sc, RE_TXCFG) & RE_TXCFG_HWREV; 849 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, sc->re_res); 850 kfree(sc, M_TEMP); 851 852 /* 853 * and continue matching for the specific chip... 854 */ 855 for (; t->re_name != NULL; t++) { 856 if (product == t->re_did && vendor == t->re_vid && 857 t->re_basetype == hwrev) { 858 device_set_desc(dev, t->re_name); 859 return(0); 860 } 861 } 862 863 if (bootverbose) 864 kprintf("re: unknown hwrev %#x\n", hwrev); 865 return(ENXIO); 866 } 867 868 /* 869 * This routine takes the segment list provided as the result of 870 * a bus_dma_map_load() operation and assigns the addresses/lengths 871 * to RealTek DMA descriptors. This can be called either by the RX 872 * code or the TX code. In the RX case, we'll probably wind up mapping 873 * at most one segment. For the TX case, there could be any number of 874 * segments since TX packets may span multiple mbufs. In either case, 875 * if the number of segments is larger than the re_maxsegs limit 876 * specified by the caller, we abort the mapping operation. Sadly, 877 * whoever designed the buffer mapping API did not provide a way to 878 * return an error from here, so we have to fake it a bit. 879 */ 880 881 static void 882 re_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, 883 bus_size_t mapsize, int error) 884 { 885 struct re_dmaload_arg *ctx; 886 struct re_desc *d = NULL; 887 int i = 0, idx; 888 uint32_t cmdstat; 889 890 if (error) 891 return; 892 893 ctx = arg; 894 895 /* Signal error to caller if there's too many segments */ 896 if (nseg > ctx->re_maxsegs) { 897 ctx->re_maxsegs = 0; 898 return; 899 } 900 901 /* 902 * Map the segment array into descriptors. Note that we set the 903 * start-of-frame and end-of-frame markers for either TX or RX, but 904 * they really only have meaning in the TX case. (In the RX case, 905 * it's the chip that tells us where packets begin and end.) 906 * We also keep track of the end of the ring and set the 907 * end-of-ring bits as needed, and we set the ownership bits 908 * in all except the very first descriptor. (The caller will 909 * set this descriptor later when it start transmission or 910 * reception.) 911 */ 912 idx = ctx->re_idx; 913 for (;;) { 914 d = &ctx->re_ring[idx]; 915 if (le32toh(d->re_cmdstat) & RE_RDESC_STAT_OWN) { 916 ctx->re_maxsegs = 0; 917 return; 918 } 919 cmdstat = segs[i].ds_len; 920 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr)); 921 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr)); 922 if (i == 0) 923 cmdstat |= RE_TDESC_CMD_SOF; 924 else 925 cmdstat |= RE_TDESC_CMD_OWN; 926 if (idx == (RE_RX_DESC_CNT - 1)) 927 cmdstat |= RE_TDESC_CMD_EOR; 928 d->re_cmdstat = htole32(cmdstat | ctx->re_flags); 929 i++; 930 if (i == nseg) 931 break; 932 RE_DESC_INC(idx); 933 } 934 935 d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF); 936 ctx->re_maxsegs = nseg; 937 ctx->re_idx = idx; 938 } 939 940 /* 941 * Map a single buffer address. 942 */ 943 944 static void 945 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 946 { 947 uint32_t *addr; 948 949 if (error) 950 return; 951 952 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 953 addr = arg; 954 *addr = segs->ds_addr; 955 } 956 957 static int 958 re_allocmem(device_t dev, struct re_softc *sc) 959 { 960 int error, i, nseg; 961 962 /* 963 * Allocate map for RX mbufs. 964 */ 965 nseg = 32; 966 error = bus_dma_tag_create(sc->re_parent_tag, ETHER_ALIGN, 0, 967 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 968 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 969 &sc->re_ldata.re_mtag); 970 if (error) { 971 device_printf(dev, "could not allocate dma tag\n"); 972 return(error); 973 } 974 975 /* 976 * Allocate map for TX descriptor list. 977 */ 978 error = bus_dma_tag_create(sc->re_parent_tag, RE_RING_ALIGN, 979 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 980 NULL, RE_TX_LIST_SZ, 1, RE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 981 &sc->re_ldata.re_tx_list_tag); 982 if (error) { 983 device_printf(dev, "could not allocate dma tag\n"); 984 return(error); 985 } 986 987 /* Allocate DMA'able memory for the TX ring */ 988 989 error = bus_dmamem_alloc(sc->re_ldata.re_tx_list_tag, 990 (void **)&sc->re_ldata.re_tx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO, 991 &sc->re_ldata.re_tx_list_map); 992 if (error) { 993 device_printf(dev, "could not allocate TX ring\n"); 994 return(error); 995 } 996 997 /* Load the map for the TX ring. */ 998 999 error = bus_dmamap_load(sc->re_ldata.re_tx_list_tag, 1000 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list, 1001 RE_TX_LIST_SZ, re_dma_map_addr, 1002 &sc->re_ldata.re_tx_list_addr, BUS_DMA_NOWAIT); 1003 if (error) { 1004 device_printf(dev, "could not get address of TX ring\n"); 1005 return(error); 1006 } 1007 1008 /* Create DMA maps for TX buffers */ 1009 1010 for (i = 0; i < RE_TX_DESC_CNT; i++) { 1011 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 1012 &sc->re_ldata.re_tx_dmamap[i]); 1013 if (error) { 1014 device_printf(dev, "can't create DMA map for TX\n"); 1015 return(error); 1016 } 1017 } 1018 1019 /* 1020 * Allocate map for RX descriptor list. 1021 */ 1022 error = bus_dma_tag_create(sc->re_parent_tag, RE_RING_ALIGN, 1023 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1024 NULL, RE_RX_LIST_SZ, 1, RE_RX_LIST_SZ, BUS_DMA_ALLOCNOW, 1025 &sc->re_ldata.re_rx_list_tag); 1026 if (error) { 1027 device_printf(dev, "could not allocate dma tag\n"); 1028 return(error); 1029 } 1030 1031 /* Allocate DMA'able memory for the RX ring */ 1032 1033 error = bus_dmamem_alloc(sc->re_ldata.re_rx_list_tag, 1034 (void **)&sc->re_ldata.re_rx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1035 &sc->re_ldata.re_rx_list_map); 1036 if (error) { 1037 device_printf(dev, "could not allocate RX ring\n"); 1038 return(error); 1039 } 1040 1041 /* Load the map for the RX ring. */ 1042 1043 error = bus_dmamap_load(sc->re_ldata.re_rx_list_tag, 1044 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list, 1045 RE_RX_LIST_SZ, re_dma_map_addr, 1046 &sc->re_ldata.re_rx_list_addr, BUS_DMA_NOWAIT); 1047 if (error) { 1048 device_printf(dev, "could not get address of RX ring\n"); 1049 return(error); 1050 } 1051 1052 /* Create DMA maps for RX buffers */ 1053 1054 for (i = 0; i < RE_RX_DESC_CNT; i++) { 1055 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 1056 &sc->re_ldata.re_rx_dmamap[i]); 1057 if (error) { 1058 device_printf(dev, "can't create DMA map for RX\n"); 1059 return(ENOMEM); 1060 } 1061 } 1062 1063 return(0); 1064 } 1065 1066 /* 1067 * Attach the interface. Allocate softc structures, do ifmedia 1068 * setup and ethernet/BPF attach. 1069 */ 1070 static int 1071 re_attach(device_t dev) 1072 { 1073 struct re_softc *sc = device_get_softc(dev); 1074 struct ifnet *ifp; 1075 const struct re_hwrev *hw_rev; 1076 uint8_t eaddr[ETHER_ADDR_LEN]; 1077 uint16_t as[ETHER_ADDR_LEN / 2]; 1078 uint16_t re_did = 0; 1079 uint32_t hwrev; 1080 int error = 0, rid, i; 1081 1082 callout_init(&sc->re_timer); 1083 #ifdef RE_DIAG 1084 sc->re_dev = dev; 1085 #endif 1086 1087 RE_ENABLE_TX_MODERATION(sc); 1088 1089 sysctl_ctx_init(&sc->re_sysctl_ctx); 1090 sc->re_sysctl_tree = SYSCTL_ADD_NODE(&sc->re_sysctl_ctx, 1091 SYSCTL_STATIC_CHILDREN(_hw), 1092 OID_AUTO, 1093 device_get_nameunit(dev), 1094 CTLFLAG_RD, 0, ""); 1095 if (sc->re_sysctl_tree == NULL) { 1096 device_printf(dev, "can't add sysctl node\n"); 1097 error = ENXIO; 1098 goto fail; 1099 } 1100 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx, 1101 SYSCTL_CHILDREN(sc->re_sysctl_tree), 1102 OID_AUTO, "tx_moderation", 1103 CTLTYPE_INT | CTLFLAG_RW, 1104 sc, 0, re_sysctl_tx_moderation, "I", 1105 "Enable/Disable TX moderation"); 1106 1107 #ifndef BURN_BRIDGES 1108 /* 1109 * Handle power management nonsense. 1110 */ 1111 1112 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1113 uint32_t membase, irq; 1114 1115 /* Save important PCI config data. */ 1116 membase = pci_read_config(dev, RE_PCI_LOMEM, 4); 1117 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1118 1119 /* Reset the power state. */ 1120 device_printf(dev, "chip is in D%d power mode " 1121 "-- setting to D0\n", pci_get_powerstate(dev)); 1122 1123 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1124 1125 /* Restore PCI config data. */ 1126 pci_write_config(dev, RE_PCI_LOMEM, membase, 4); 1127 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1128 } 1129 #endif 1130 /* 1131 * Map control/status registers. 1132 */ 1133 pci_enable_busmaster(dev); 1134 1135 rid = RE_PCI_LOIO; 1136 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 1137 RF_ACTIVE); 1138 1139 if (sc->re_res == NULL) { 1140 device_printf(dev, "couldn't map ports\n"); 1141 error = ENXIO; 1142 goto fail; 1143 } 1144 1145 sc->re_btag = rman_get_bustag(sc->re_res); 1146 sc->re_bhandle = rman_get_bushandle(sc->re_res); 1147 1148 /* Allocate interrupt */ 1149 rid = 0; 1150 sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1151 RF_SHAREABLE | RF_ACTIVE); 1152 1153 if (sc->re_irq == NULL) { 1154 device_printf(dev, "couldn't map interrupt\n"); 1155 error = ENXIO; 1156 goto fail; 1157 } 1158 1159 /* Reset the adapter. */ 1160 re_reset(sc); 1161 1162 hwrev = CSR_READ_4(sc, RE_TXCFG) & RE_TXCFG_HWREV; 1163 for (hw_rev = re_hwrevs; hw_rev->re_desc != NULL; hw_rev++) { 1164 if (hw_rev->re_rev == hwrev) { 1165 sc->re_type = hw_rev->re_type; 1166 sc->re_flags = hw_rev->re_flags; 1167 break; 1168 } 1169 } 1170 1171 sc->re_eewidth = 6; 1172 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1173 if (re_did != 0x8129) 1174 sc->re_eewidth = 8; 1175 1176 /* 1177 * Get station address from the EEPROM. 1178 */ 1179 re_read_eeprom(sc, (caddr_t)as, RE_EE_EADDR, 3); 1180 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1181 as[i] = le16toh(as[i]); 1182 bcopy(as, eaddr, sizeof(eaddr)); 1183 1184 if (sc->re_type == RE_8169) { 1185 /* Set RX length mask */ 1186 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN; 1187 sc->re_txstart = RE_GTXSTART; 1188 } else { 1189 /* Set RX length mask */ 1190 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN; 1191 sc->re_txstart = RE_TXSTART; 1192 } 1193 1194 /* 1195 * Allocate the parent bus DMA tag appropriate for PCI. 1196 */ 1197 #define RE_NSEG_NEW 32 1198 error = bus_dma_tag_create(NULL, /* parent */ 1199 1, 0, /* alignment, boundary */ 1200 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1201 BUS_SPACE_MAXADDR, /* highaddr */ 1202 NULL, NULL, /* filter, filterarg */ 1203 MAXBSIZE, RE_NSEG_NEW, /* maxsize, nsegments */ 1204 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1205 BUS_DMA_ALLOCNOW, /* flags */ 1206 &sc->re_parent_tag); 1207 if (error) 1208 goto fail; 1209 1210 error = re_allocmem(dev, sc); 1211 1212 if (error) 1213 goto fail; 1214 1215 /* Do MII setup */ 1216 if (mii_phy_probe(dev, &sc->re_miibus, 1217 re_ifmedia_upd, re_ifmedia_sts)) { 1218 device_printf(dev, "MII without any phy!\n"); 1219 error = ENXIO; 1220 goto fail; 1221 } 1222 1223 ifp = &sc->arpcom.ac_if; 1224 ifp->if_softc = sc; 1225 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1226 ifp->if_mtu = ETHERMTU; 1227 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1228 ifp->if_ioctl = re_ioctl; 1229 ifp->if_start = re_start; 1230 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1231 if (hwrev != RE_HWREV_8168C) /* XXX does not work yet */ 1232 ifp->if_capabilities |= IFCAP_HWCSUM; 1233 #ifdef DEVICE_POLLING 1234 ifp->if_poll = re_poll; 1235 #endif 1236 ifp->if_watchdog = re_watchdog; 1237 ifp->if_init = re_init; 1238 if (sc->re_type == RE_8169) 1239 ifp->if_baudrate = 1000000000; 1240 else 1241 ifp->if_baudrate = 100000000; 1242 ifq_set_maxlen(&ifp->if_snd, RE_IFQ_MAXLEN); 1243 ifq_set_ready(&ifp->if_snd); 1244 1245 #ifdef RE_DISABLE_HWCSUM 1246 ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM; 1247 ifp->if_hwassist = 0; 1248 #else 1249 ifp->if_capenable = ifp->if_capabilities; 1250 if (ifp->if_capabilities & IFCAP_HWCSUM) 1251 ifp->if_hwassist = RE_CSUM_FEATURES; 1252 else 1253 ifp->if_hwassist = 0; 1254 #endif /* RE_DISABLE_HWCSUM */ 1255 1256 /* 1257 * Call MI attach routine. 1258 */ 1259 ether_ifattach(ifp, eaddr, NULL); 1260 1261 #ifdef RE_DIAG 1262 /* 1263 * Perform hardware diagnostic on the original RTL8169. 1264 * Some 32-bit cards were incorrectly wired and would 1265 * malfunction if plugged into a 64-bit slot. 1266 */ 1267 if (hwrev == RE_HWREV_8169) { 1268 lwkt_serialize_enter(ifp->if_serializer); 1269 error = re_diag(sc); 1270 lwkt_serialize_exit(ifp->if_serializer); 1271 1272 if (error) { 1273 device_printf(dev, "hardware diagnostic failure\n"); 1274 ether_ifdetach(ifp); 1275 goto fail; 1276 } 1277 } 1278 #endif /* RE_DIAG */ 1279 1280 /* Hook interrupt last to avoid having to lock softc */ 1281 error = bus_setup_intr(dev, sc->re_irq, INTR_NETSAFE, re_intr, sc, 1282 &sc->re_intrhand, ifp->if_serializer); 1283 1284 if (error) { 1285 device_printf(dev, "couldn't set up irq\n"); 1286 ether_ifdetach(ifp); 1287 goto fail; 1288 } 1289 1290 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->re_irq)); 1291 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1292 1293 fail: 1294 if (error) 1295 re_detach(dev); 1296 1297 return (error); 1298 } 1299 1300 /* 1301 * Shutdown hardware and free up resources. This can be called any 1302 * time after the mutex has been initialized. It is called in both 1303 * the error case in attach and the normal detach case so it needs 1304 * to be careful about only freeing resources that have actually been 1305 * allocated. 1306 */ 1307 static int 1308 re_detach(device_t dev) 1309 { 1310 struct re_softc *sc = device_get_softc(dev); 1311 struct ifnet *ifp = &sc->arpcom.ac_if; 1312 int i; 1313 1314 /* These should only be active if attach succeeded */ 1315 if (device_is_attached(dev)) { 1316 lwkt_serialize_enter(ifp->if_serializer); 1317 re_stop(sc); 1318 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand); 1319 lwkt_serialize_exit(ifp->if_serializer); 1320 1321 ether_ifdetach(ifp); 1322 } 1323 if (sc->re_miibus) 1324 device_delete_child(dev, sc->re_miibus); 1325 bus_generic_detach(dev); 1326 1327 if (sc->re_irq) 1328 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->re_irq); 1329 if (sc->re_res) { 1330 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, 1331 sc->re_res); 1332 } 1333 1334 /* Unload and free the RX DMA ring memory and map */ 1335 1336 if (sc->re_ldata.re_rx_list_tag) { 1337 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag, 1338 sc->re_ldata.re_rx_list_map); 1339 bus_dmamem_free(sc->re_ldata.re_rx_list_tag, 1340 sc->re_ldata.re_rx_list, 1341 sc->re_ldata.re_rx_list_map); 1342 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 1343 } 1344 1345 /* Unload and free the TX DMA ring memory and map */ 1346 1347 if (sc->re_ldata.re_tx_list_tag) { 1348 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag, 1349 sc->re_ldata.re_tx_list_map); 1350 bus_dmamem_free(sc->re_ldata.re_tx_list_tag, 1351 sc->re_ldata.re_tx_list, 1352 sc->re_ldata.re_tx_list_map); 1353 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 1354 } 1355 1356 /* Destroy all the RX and TX buffer maps */ 1357 1358 if (sc->re_ldata.re_mtag) { 1359 for (i = 0; i < RE_TX_DESC_CNT; i++) 1360 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1361 sc->re_ldata.re_tx_dmamap[i]); 1362 for (i = 0; i < RE_RX_DESC_CNT; i++) 1363 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1364 sc->re_ldata.re_rx_dmamap[i]); 1365 bus_dma_tag_destroy(sc->re_ldata.re_mtag); 1366 } 1367 1368 /* Unload and free the stats buffer and map */ 1369 1370 if (sc->re_ldata.re_stag) { 1371 bus_dmamap_unload(sc->re_ldata.re_stag, 1372 sc->re_ldata.re_rx_list_map); 1373 bus_dmamem_free(sc->re_ldata.re_stag, 1374 sc->re_ldata.re_stats, 1375 sc->re_ldata.re_smap); 1376 bus_dma_tag_destroy(sc->re_ldata.re_stag); 1377 } 1378 1379 if (sc->re_parent_tag) 1380 bus_dma_tag_destroy(sc->re_parent_tag); 1381 1382 return(0); 1383 } 1384 1385 static int 1386 re_newbuf(struct re_softc *sc, int idx, struct mbuf *m) 1387 { 1388 struct re_dmaload_arg arg; 1389 struct mbuf *n = NULL; 1390 int error; 1391 1392 if (m == NULL) { 1393 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 1394 if (n == NULL) 1395 return(ENOBUFS); 1396 m = n; 1397 } else 1398 m->m_data = m->m_ext.ext_buf; 1399 1400 m->m_len = m->m_pkthdr.len = MCLBYTES; 1401 1402 /* 1403 * NOTE: 1404 * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer 1405 * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here. 1406 */ 1407 1408 arg.sc = sc; 1409 arg.re_idx = idx; 1410 arg.re_maxsegs = 1; 1411 arg.re_flags = 0; 1412 arg.re_ring = sc->re_ldata.re_rx_list; 1413 1414 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, 1415 sc->re_ldata.re_rx_dmamap[idx], m, re_dma_map_desc, 1416 &arg, BUS_DMA_NOWAIT); 1417 if (error || arg.re_maxsegs != 1) { 1418 if (n != NULL) 1419 m_freem(n); 1420 return (ENOMEM); 1421 } 1422 1423 sc->re_ldata.re_rx_list[idx].re_cmdstat |= htole32(RE_RDESC_CMD_OWN); 1424 sc->re_ldata.re_rx_mbuf[idx] = m; 1425 1426 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[idx], 1427 BUS_DMASYNC_PREREAD); 1428 1429 return(0); 1430 } 1431 1432 static int 1433 re_tx_list_init(struct re_softc *sc) 1434 { 1435 bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ); 1436 bzero(&sc->re_ldata.re_tx_mbuf, RE_TX_DESC_CNT * sizeof(struct mbuf *)); 1437 1438 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 1439 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE); 1440 sc->re_ldata.re_tx_prodidx = 0; 1441 sc->re_ldata.re_tx_considx = 0; 1442 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT; 1443 1444 return(0); 1445 } 1446 1447 static int 1448 re_rx_list_init(struct re_softc *sc) 1449 { 1450 int i, error; 1451 1452 bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ); 1453 bzero(&sc->re_ldata.re_rx_mbuf, RE_RX_DESC_CNT * sizeof(struct mbuf *)); 1454 1455 for (i = 0; i < RE_RX_DESC_CNT; i++) { 1456 error = re_newbuf(sc, i, NULL); 1457 if (error) 1458 return(error); 1459 } 1460 1461 /* Flush the RX descriptors */ 1462 1463 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1464 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_PREWRITE); 1465 1466 sc->re_ldata.re_rx_prodidx = 0; 1467 sc->re_head = sc->re_tail = NULL; 1468 1469 return(0); 1470 } 1471 1472 /* 1473 * RX handler for C+ and 8169. For the gigE chips, we support 1474 * the reception of jumbo frames that have been fragmented 1475 * across multiple 2K mbuf cluster buffers. 1476 */ 1477 static void 1478 re_rxeof(struct re_softc *sc) 1479 { 1480 struct ifnet *ifp = &sc->arpcom.ac_if; 1481 struct mbuf *m; 1482 struct re_desc *cur_rx; 1483 uint32_t rxstat, rxvlan; 1484 int i, total_len; 1485 #ifdef ETHER_INPUT_CHAIN 1486 struct mbuf_chain chain[MAXCPU]; 1487 #endif 1488 1489 /* Invalidate the descriptor memory */ 1490 1491 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1492 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD); 1493 1494 #ifdef ETHER_INPUT_CHAIN 1495 ether_input_chain_init(chain); 1496 #endif 1497 1498 for (i = sc->re_ldata.re_rx_prodidx; 1499 RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0 ; RE_DESC_INC(i)) { 1500 cur_rx = &sc->re_ldata.re_rx_list[i]; 1501 m = sc->re_ldata.re_rx_mbuf[i]; 1502 total_len = RE_RXBYTES(cur_rx); 1503 rxstat = le32toh(cur_rx->re_cmdstat); 1504 rxvlan = le32toh(cur_rx->re_vlanctl); 1505 1506 /* Invalidate the RX mbuf and unload its map */ 1507 1508 bus_dmamap_sync(sc->re_ldata.re_mtag, 1509 sc->re_ldata.re_rx_dmamap[i], 1510 BUS_DMASYNC_POSTWRITE); 1511 bus_dmamap_unload(sc->re_ldata.re_mtag, 1512 sc->re_ldata.re_rx_dmamap[i]); 1513 1514 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1515 m->m_len = MCLBYTES - ETHER_ALIGN; 1516 if (sc->re_head == NULL) { 1517 sc->re_head = sc->re_tail = m; 1518 } else { 1519 sc->re_tail->m_next = m; 1520 sc->re_tail = m; 1521 } 1522 re_newbuf(sc, i, NULL); 1523 continue; 1524 } 1525 1526 /* 1527 * NOTE: for the 8139C+, the frame length field 1528 * is always 12 bits in size, but for the gigE chips, 1529 * it is 13 bits (since the max RX frame length is 16K). 1530 * Unfortunately, all 32 bits in the status word 1531 * were already used, so to make room for the extra 1532 * length bit, RealTek took out the 'frame alignment 1533 * error' bit and shifted the other status bits 1534 * over one slot. The OWN, EOR, FS and LS bits are 1535 * still in the same places. We have already extracted 1536 * the frame length and checked the OWN bit, so rather 1537 * than using an alternate bit mapping, we shift the 1538 * status bits one space to the right so we can evaluate 1539 * them using the 8169 status as though it was in the 1540 * same format as that of the 8139C+. 1541 */ 1542 if (sc->re_type == RE_8169) 1543 rxstat >>= 1; 1544 1545 if (rxstat & RE_RDESC_STAT_RXERRSUM) { 1546 ifp->if_ierrors++; 1547 /* 1548 * If this is part of a multi-fragment packet, 1549 * discard all the pieces. 1550 */ 1551 if (sc->re_head != NULL) { 1552 m_freem(sc->re_head); 1553 sc->re_head = sc->re_tail = NULL; 1554 } 1555 re_newbuf(sc, i, m); 1556 continue; 1557 } 1558 1559 /* 1560 * If allocating a replacement mbuf fails, 1561 * reload the current one. 1562 */ 1563 1564 if (re_newbuf(sc, i, NULL)) { 1565 ifp->if_ierrors++; 1566 if (sc->re_head != NULL) { 1567 m_freem(sc->re_head); 1568 sc->re_head = sc->re_tail = NULL; 1569 } 1570 re_newbuf(sc, i, m); 1571 continue; 1572 } 1573 1574 if (sc->re_head != NULL) { 1575 m->m_len = total_len % (MCLBYTES - ETHER_ALIGN); 1576 /* 1577 * Special case: if there's 4 bytes or less 1578 * in this buffer, the mbuf can be discarded: 1579 * the last 4 bytes is the CRC, which we don't 1580 * care about anyway. 1581 */ 1582 if (m->m_len <= ETHER_CRC_LEN) { 1583 sc->re_tail->m_len -= 1584 (ETHER_CRC_LEN - m->m_len); 1585 m_freem(m); 1586 } else { 1587 m->m_len -= ETHER_CRC_LEN; 1588 sc->re_tail->m_next = m; 1589 } 1590 m = sc->re_head; 1591 sc->re_head = sc->re_tail = NULL; 1592 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1593 } else 1594 m->m_pkthdr.len = m->m_len = 1595 (total_len - ETHER_CRC_LEN); 1596 1597 ifp->if_ipackets++; 1598 m->m_pkthdr.rcvif = ifp; 1599 1600 /* Do RX checksumming if enabled */ 1601 1602 if (ifp->if_capenable & IFCAP_RXCSUM) { 1603 1604 /* Check IP header checksum */ 1605 if (rxstat & RE_RDESC_STAT_PROTOID) 1606 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1607 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0) 1608 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1609 1610 /* Check TCP/UDP checksum */ 1611 if ((RE_TCPPKT(rxstat) && 1612 (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) || 1613 (RE_UDPPKT(rxstat) && 1614 (rxstat & RE_RDESC_STAT_UDPSUMBAD)) == 0) { 1615 m->m_pkthdr.csum_flags |= 1616 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 1617 CSUM_FRAG_NOT_CHECKED; 1618 m->m_pkthdr.csum_data = 0xffff; 1619 } 1620 } 1621 1622 if (rxvlan & RE_RDESC_VLANCTL_TAG) { 1623 m->m_flags |= M_VLANTAG; 1624 m->m_pkthdr.ether_vlantag = 1625 be16toh((rxvlan & RE_RDESC_VLANCTL_DATA)); 1626 } 1627 #ifdef ETHER_INPUT_CHAIN 1628 #ifdef ETHER_INPUT2 1629 ether_input_chain2(ifp, m, chain); 1630 #else 1631 ether_input_chain(ifp, m, chain); 1632 #endif 1633 #else 1634 ifp->if_input(ifp, m); 1635 #endif 1636 } 1637 1638 #ifdef ETHER_INPUT_CHAIN 1639 ether_input_dispatch(chain); 1640 #endif 1641 1642 /* Flush the RX DMA ring */ 1643 1644 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1645 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_PREWRITE); 1646 1647 sc->re_ldata.re_rx_prodidx = i; 1648 } 1649 1650 static void 1651 re_txeof(struct re_softc *sc) 1652 { 1653 struct ifnet *ifp = &sc->arpcom.ac_if; 1654 uint32_t txstat; 1655 int idx; 1656 1657 /* Invalidate the TX descriptor list */ 1658 1659 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 1660 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_POSTREAD); 1661 1662 for (idx = sc->re_ldata.re_tx_considx; 1663 sc->re_ldata.re_tx_free < RE_TX_DESC_CNT; RE_DESC_INC(idx)) { 1664 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat); 1665 if (txstat & RE_TDESC_CMD_OWN) 1666 break; 1667 1668 sc->re_ldata.re_tx_list[idx].re_bufaddr_lo = 0; 1669 1670 /* 1671 * We only stash mbufs in the last descriptor 1672 * in a fragment chain, which also happens to 1673 * be the only place where the TX status bits 1674 * are valid. 1675 */ 1676 if (txstat & RE_TDESC_CMD_EOF) { 1677 m_freem(sc->re_ldata.re_tx_mbuf[idx]); 1678 sc->re_ldata.re_tx_mbuf[idx] = NULL; 1679 bus_dmamap_unload(sc->re_ldata.re_mtag, 1680 sc->re_ldata.re_tx_dmamap[idx]); 1681 if (txstat & (RE_TDESC_STAT_EXCESSCOL| 1682 RE_TDESC_STAT_COLCNT)) 1683 ifp->if_collisions++; 1684 if (txstat & RE_TDESC_STAT_TXERRSUM) 1685 ifp->if_oerrors++; 1686 else 1687 ifp->if_opackets++; 1688 } 1689 sc->re_ldata.re_tx_free++; 1690 } 1691 1692 /* No changes made to the TX ring, so no flush needed */ 1693 if (sc->re_ldata.re_tx_free) { 1694 sc->re_ldata.re_tx_considx = idx; 1695 ifp->if_flags &= ~IFF_OACTIVE; 1696 ifp->if_timer = 0; 1697 } 1698 1699 /* 1700 * Some chips will ignore a second TX request issued while an 1701 * existing transmission is in progress. If the transmitter goes 1702 * idle but there are still packets waiting to be sent, we need 1703 * to restart the channel here to flush them out. This only seems 1704 * to be required with the PCIe devices. 1705 */ 1706 if (sc->re_ldata.re_tx_free < RE_TX_DESC_CNT) 1707 CSR_WRITE_1(sc, sc->re_txstart, RE_TXSTART_START); 1708 1709 /* 1710 * If not all descriptors have been released reaped yet, 1711 * reload the timer so that we will eventually get another 1712 * interrupt that will cause us to re-enter this routine. 1713 * This is done in case the transmitter has gone idle. 1714 */ 1715 if (RE_TX_MODERATION_IS_ENABLED(sc) && 1716 sc->re_ldata.re_tx_free < RE_TX_DESC_CNT) 1717 CSR_WRITE_4(sc, RE_TIMERCNT, 1); 1718 } 1719 1720 static void 1721 re_tick(void *xsc) 1722 { 1723 struct re_softc *sc = xsc; 1724 1725 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); 1726 re_tick_serialized(xsc); 1727 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); 1728 } 1729 1730 static void 1731 re_tick_serialized(void *xsc) 1732 { 1733 struct re_softc *sc = xsc; 1734 struct ifnet *ifp = &sc->arpcom.ac_if; 1735 struct mii_data *mii; 1736 1737 mii = device_get_softc(sc->re_miibus); 1738 mii_tick(mii); 1739 if (sc->re_link) { 1740 if (!(mii->mii_media_status & IFM_ACTIVE)) 1741 sc->re_link = 0; 1742 } else { 1743 if (mii->mii_media_status & IFM_ACTIVE && 1744 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1745 sc->re_link = 1; 1746 if (!ifq_is_empty(&ifp->if_snd)) 1747 if_devstart(ifp); 1748 } 1749 } 1750 1751 callout_reset(&sc->re_timer, hz, re_tick, sc); 1752 } 1753 1754 #ifdef DEVICE_POLLING 1755 1756 static void 1757 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1758 { 1759 struct re_softc *sc = ifp->if_softc; 1760 1761 switch(cmd) { 1762 case POLL_REGISTER: 1763 /* disable interrupts */ 1764 CSR_WRITE_2(sc, RE_IMR, 0x0000); 1765 break; 1766 case POLL_DEREGISTER: 1767 /* enable interrupts */ 1768 CSR_WRITE_2(sc, RE_IMR, sc->re_intrs); 1769 break; 1770 default: 1771 sc->rxcycles = count; 1772 re_rxeof(sc); 1773 re_txeof(sc); 1774 1775 if (!ifq_is_empty(&ifp->if_snd)) 1776 if_devstart(ifp); 1777 1778 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1779 uint16_t status; 1780 1781 status = CSR_READ_2(sc, RE_ISR); 1782 if (status == 0xffff) 1783 return; 1784 if (status) 1785 CSR_WRITE_2(sc, RE_ISR, status); 1786 1787 /* 1788 * XXX check behaviour on receiver stalls. 1789 */ 1790 1791 if (status & RE_ISR_SYSTEM_ERR) { 1792 re_reset(sc); 1793 re_init(sc); 1794 } 1795 } 1796 break; 1797 } 1798 } 1799 #endif /* DEVICE_POLLING */ 1800 1801 static void 1802 re_intr(void *arg) 1803 { 1804 struct re_softc *sc = arg; 1805 struct ifnet *ifp = &sc->arpcom.ac_if; 1806 uint16_t status; 1807 1808 if (sc->suspended || (ifp->if_flags & IFF_UP) == 0) 1809 return; 1810 1811 for (;;) { 1812 status = CSR_READ_2(sc, RE_ISR); 1813 /* If the card has gone away the read returns 0xffff. */ 1814 if (status == 0xffff) 1815 break; 1816 if (status) 1817 CSR_WRITE_2(sc, RE_ISR, status); 1818 1819 if ((status & sc->re_intrs) == 0) 1820 break; 1821 1822 if (status & (RE_ISR_RX_OK | RE_ISR_RX_ERR | RE_ISR_FIFO_OFLOW)) 1823 re_rxeof(sc); 1824 1825 if ((status & sc->re_tx_ack) || 1826 (status & RE_ISR_TX_ERR) || 1827 (status & RE_ISR_TX_DESC_UNAVAIL)) 1828 re_txeof(sc); 1829 1830 if (status & RE_ISR_SYSTEM_ERR) { 1831 re_reset(sc); 1832 re_init(sc); 1833 } 1834 1835 if (status & RE_ISR_LINKCHG) { 1836 callout_stop(&sc->re_timer); 1837 re_tick_serialized(sc); 1838 } 1839 } 1840 1841 if (!ifq_is_empty(&ifp->if_snd)) 1842 if_devstart(ifp); 1843 } 1844 1845 static int 1846 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx, int *called_defrag) 1847 { 1848 struct ifnet *ifp = &sc->arpcom.ac_if; 1849 struct mbuf *m, *m_new = NULL; 1850 struct re_dmaload_arg arg; 1851 bus_dmamap_t map; 1852 int error; 1853 1854 KASSERT(sc->re_ldata.re_tx_free > 4, ("not enough free TX desc\n")); 1855 1856 *called_defrag = 0; 1857 m = *m_head; 1858 1859 /* 1860 * Set up checksum offload. Note: checksum offload bits must 1861 * appear in all descriptors of a multi-descriptor transmit 1862 * attempt. (This is according to testing done with an 8169 1863 * chip. I'm not sure if this is a requirement or a bug.) 1864 */ 1865 1866 arg.re_flags = 0; 1867 1868 if (m->m_pkthdr.csum_flags & CSUM_IP) 1869 arg.re_flags |= RE_TDESC_CMD_IPCSUM; 1870 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1871 arg.re_flags |= RE_TDESC_CMD_TCPCSUM; 1872 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1873 arg.re_flags |= RE_TDESC_CMD_UDPCSUM; 1874 1875 arg.sc = sc; 1876 arg.re_idx = *idx; 1877 arg.re_maxsegs = sc->re_ldata.re_tx_free; 1878 if (arg.re_maxsegs > 4) 1879 arg.re_maxsegs -= 4; 1880 arg.re_ring = sc->re_ldata.re_tx_list; 1881 1882 map = sc->re_ldata.re_tx_dmamap[*idx]; 1883 1884 /* 1885 * With some of the RealTek chips, using the checksum offload 1886 * support in conjunction with the autopadding feature results 1887 * in the transmission of corrupt frames. For example, if we 1888 * need to send a really small IP fragment that's less than 60 1889 * bytes in size, and IP header checksumming is enabled, the 1890 * resulting ethernet frame that appears on the wire will 1891 * have garbled payload. To work around this, if TX checksum 1892 * offload is enabled, we always manually pad short frames out 1893 * to the minimum ethernet frame size. We do this by pretending 1894 * the mbuf chain has too many fragments so the coalescing code 1895 * below can assemble the packet into a single buffer that's 1896 * padded out to the mininum frame size. 1897 * 1898 * Note: this appears unnecessary for TCP, and doing it for TCP 1899 * with PCIe adapters seems to result in bad checksums. 1900 */ 1901 if (arg.re_flags && !(arg.re_flags & RE_TDESC_CMD_TCPCSUM) && 1902 m->m_pkthdr.len < RE_MIN_FRAMELEN) { 1903 error = EFBIG; 1904 } else { 1905 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map, 1906 m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 1907 } 1908 1909 if (error && error != EFBIG) { 1910 if_printf(ifp, "can't map mbuf (error %d)\n", error); 1911 return(ENOBUFS); 1912 } 1913 1914 /* Too many segments to map, coalesce into a single mbuf */ 1915 1916 if (error || arg.re_maxsegs == 0) { 1917 m_new = m_defrag_nofree(m, MB_DONTWAIT); 1918 if (m_new == NULL) { 1919 return(1); 1920 } else { 1921 m = m_new; 1922 *m_head = m; 1923 } 1924 1925 /* 1926 * Manually pad short frames, and zero the pad space 1927 * to avoid leaking data. 1928 */ 1929 if (m_new->m_pkthdr.len < RE_MIN_FRAMELEN) { 1930 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, 1931 RE_MIN_FRAMELEN - m_new->m_pkthdr.len); 1932 m_new->m_pkthdr.len += RE_MIN_FRAMELEN - 1933 m_new->m_pkthdr.len; 1934 m_new->m_len = m_new->m_pkthdr.len; 1935 } 1936 1937 *called_defrag = 1; 1938 arg.sc = sc; 1939 arg.re_idx = *idx; 1940 arg.re_maxsegs = sc->re_ldata.re_tx_free; 1941 arg.re_ring = sc->re_ldata.re_tx_list; 1942 1943 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map, 1944 m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 1945 if (error) { 1946 m_freem(m); 1947 if_printf(ifp, "can't map mbuf (error %d)\n", error); 1948 return(EFBIG); 1949 } 1950 } 1951 1952 /* 1953 * Insure that the map for this transmission 1954 * is placed at the array index of the last descriptor 1955 * in this chain. 1956 */ 1957 sc->re_ldata.re_tx_dmamap[*idx] = 1958 sc->re_ldata.re_tx_dmamap[arg.re_idx]; 1959 sc->re_ldata.re_tx_dmamap[arg.re_idx] = map; 1960 1961 sc->re_ldata.re_tx_mbuf[arg.re_idx] = m; 1962 sc->re_ldata.re_tx_free -= arg.re_maxsegs; 1963 1964 /* 1965 * Set up hardware VLAN tagging. Note: vlan tag info must 1966 * appear in the first descriptor of a multi-descriptor 1967 * transmission attempt. 1968 */ 1969 1970 if (m->m_flags & M_VLANTAG) { 1971 sc->re_ldata.re_tx_list[*idx].re_vlanctl = 1972 htole32(htobe16(m->m_pkthdr.ether_vlantag) | 1973 RE_TDESC_VLANCTL_TAG); 1974 } 1975 1976 /* Transfer ownership of packet to the chip. */ 1977 1978 sc->re_ldata.re_tx_list[arg.re_idx].re_cmdstat |= 1979 htole32(RE_TDESC_CMD_OWN); 1980 if (*idx != arg.re_idx) 1981 sc->re_ldata.re_tx_list[*idx].re_cmdstat |= 1982 htole32(RE_TDESC_CMD_OWN); 1983 1984 RE_DESC_INC(arg.re_idx); 1985 *idx = arg.re_idx; 1986 1987 return(0); 1988 } 1989 1990 /* 1991 * Main transmit routine for C+ and gigE NICs. 1992 */ 1993 1994 static void 1995 re_start(struct ifnet *ifp) 1996 { 1997 struct re_softc *sc = ifp->if_softc; 1998 struct mbuf *m_head; 1999 struct mbuf *m_head2; 2000 int called_defrag, idx, need_trans; 2001 2002 if (!sc->re_link) { 2003 ifq_purge(&ifp->if_snd); 2004 return; 2005 } 2006 2007 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 2008 return; 2009 2010 idx = sc->re_ldata.re_tx_prodidx; 2011 2012 need_trans = 0; 2013 while (sc->re_ldata.re_tx_mbuf[idx] == NULL) { 2014 if (sc->re_ldata.re_tx_free <= 4) { 2015 ifp->if_flags |= IFF_OACTIVE; 2016 break; 2017 } 2018 2019 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2020 if (m_head == NULL) 2021 break; 2022 2023 m_head2 = m_head; 2024 if (re_encap(sc, &m_head2, &idx, &called_defrag)) { 2025 /* 2026 * If we could not encapsulate the defragged packet, 2027 * the returned m_head2 is garbage and we must dequeue 2028 * and throw away the original packet. 2029 */ 2030 if (called_defrag) 2031 m_freem(m_head); 2032 ifp->if_flags |= IFF_OACTIVE; 2033 break; 2034 } 2035 2036 /* 2037 * Clean out the packet we encapsulated. If we defragged 2038 * the packet the m_head2 is the one that got encapsulated 2039 * and the original must be thrown away. Otherwise m_head2 2040 * *IS* the original. 2041 */ 2042 if (called_defrag) 2043 m_freem(m_head); 2044 need_trans = 1; 2045 2046 /* 2047 * If there's a BPF listener, bounce a copy of this frame 2048 * to him. 2049 */ 2050 ETHER_BPF_MTAP(ifp, m_head2); 2051 } 2052 2053 if (!need_trans) { 2054 if (RE_TX_MODERATION_IS_ENABLED(sc) && 2055 sc->re_ldata.re_tx_free != RE_TX_DESC_CNT) 2056 CSR_WRITE_4(sc, RE_TIMERCNT, 1); 2057 return; 2058 } 2059 2060 /* Flush the TX descriptors */ 2061 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 2062 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE); 2063 2064 sc->re_ldata.re_tx_prodidx = idx; 2065 2066 /* 2067 * RealTek put the TX poll request register in a different 2068 * location on the 8169 gigE chip. I don't know why. 2069 */ 2070 CSR_WRITE_1(sc, sc->re_txstart, RE_TXSTART_START); 2071 2072 if (RE_TX_MODERATION_IS_ENABLED(sc)) { 2073 /* 2074 * Use the countdown timer for interrupt moderation. 2075 * 'TX done' interrupts are disabled. Instead, we reset the 2076 * countdown timer, which will begin counting until it hits 2077 * the value in the TIMERINT register, and then trigger an 2078 * interrupt. Each time we write to the TIMERCNT register, 2079 * the timer count is reset to 0. 2080 */ 2081 CSR_WRITE_4(sc, RE_TIMERCNT, 1); 2082 } 2083 2084 /* 2085 * Set a timeout in case the chip goes out to lunch. 2086 */ 2087 ifp->if_timer = 5; 2088 } 2089 2090 static void 2091 re_init(void *xsc) 2092 { 2093 struct re_softc *sc = xsc; 2094 struct ifnet *ifp = &sc->arpcom.ac_if; 2095 struct mii_data *mii; 2096 uint32_t rxcfg = 0; 2097 2098 mii = device_get_softc(sc->re_miibus); 2099 2100 /* 2101 * Cancel pending I/O and free all RX/TX buffers. 2102 */ 2103 re_stop(sc); 2104 2105 /* 2106 * Enable C+ RX and TX mode, as well as VLAN stripping and 2107 * RX checksum offload. We must configure the C+ register 2108 * before all others. 2109 */ 2110 CSR_WRITE_2(sc, RE_CPLUS_CMD, RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB | 2111 RE_CPLUSCMD_PCI_MRW | RE_CPLUSCMD_VLANSTRIP | 2112 (ifp->if_capenable & IFCAP_RXCSUM ? 2113 RE_CPLUSCMD_RXCSUM_ENB : 0)); 2114 2115 /* 2116 * Init our MAC address. Even though the chipset 2117 * documentation doesn't mention it, we need to enter "Config 2118 * register write enable" mode to modify the ID registers. 2119 */ 2120 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_WRITECFG); 2121 CSR_WRITE_4(sc, RE_IDR0, 2122 htole32(*(uint32_t *)(&sc->arpcom.ac_enaddr[0]))); 2123 CSR_WRITE_2(sc, RE_IDR4, 2124 htole16(*(uint16_t *)(&sc->arpcom.ac_enaddr[4]))); 2125 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF); 2126 2127 /* 2128 * For C+ mode, initialize the RX descriptors and mbufs. 2129 */ 2130 re_rx_list_init(sc); 2131 re_tx_list_init(sc); 2132 2133 /* 2134 * Load the addresses of the RX and TX lists into the chip. 2135 */ 2136 CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI, 2137 RE_ADDR_HI(sc->re_ldata.re_rx_list_addr)); 2138 CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO, 2139 RE_ADDR_LO(sc->re_ldata.re_rx_list_addr)); 2140 2141 CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI, 2142 RE_ADDR_HI(sc->re_ldata.re_tx_list_addr)); 2143 CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO, 2144 RE_ADDR_LO(sc->re_ldata.re_tx_list_addr)); 2145 2146 /* 2147 * Enable transmit and receive. 2148 */ 2149 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB); 2150 2151 /* 2152 * Set the initial TX and RX configuration. 2153 */ 2154 if (sc->re_testmode) { 2155 if (sc->re_type == RE_8169) 2156 CSR_WRITE_4(sc, RE_TXCFG, 2157 RE_TXCFG_CONFIG | RE_LOOPTEST_ON); 2158 else 2159 CSR_WRITE_4(sc, RE_TXCFG, 2160 RE_TXCFG_CONFIG | RE_LOOPTEST_ON_CPLUS); 2161 } else 2162 CSR_WRITE_4(sc, RE_TXCFG, RE_TXCFG_CONFIG); 2163 2164 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, 16); 2165 2166 CSR_WRITE_4(sc, RE_RXCFG, RE_RXCFG_CONFIG); 2167 2168 /* Set the individual bit to receive frames for this host only. */ 2169 rxcfg = CSR_READ_4(sc, RE_RXCFG); 2170 rxcfg |= RE_RXCFG_RX_INDIV; 2171 2172 /* If we want promiscuous mode, set the allframes bit. */ 2173 if (ifp->if_flags & IFF_PROMISC) { 2174 rxcfg |= RE_RXCFG_RX_ALLPHYS; 2175 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 2176 } else { 2177 rxcfg &= ~RE_RXCFG_RX_ALLPHYS; 2178 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 2179 } 2180 2181 /* 2182 * Set capture broadcast bit to capture broadcast frames. 2183 */ 2184 if (ifp->if_flags & IFF_BROADCAST) { 2185 rxcfg |= RE_RXCFG_RX_BROAD; 2186 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 2187 } else { 2188 rxcfg &= ~RE_RXCFG_RX_BROAD; 2189 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 2190 } 2191 2192 /* 2193 * Program the multicast filter, if necessary. 2194 */ 2195 re_setmulti(sc); 2196 2197 #ifdef DEVICE_POLLING 2198 /* 2199 * Disable interrupts if we are polling. 2200 */ 2201 if (ifp->if_flags & IFF_POLLING) 2202 CSR_WRITE_2(sc, RE_IMR, 0); 2203 else /* otherwise ... */ 2204 #endif /* DEVICE_POLLING */ 2205 /* 2206 * Enable interrupts. 2207 */ 2208 if (sc->re_testmode) 2209 CSR_WRITE_2(sc, RE_IMR, 0); 2210 else 2211 CSR_WRITE_2(sc, RE_IMR, sc->re_intrs); 2212 CSR_WRITE_2(sc, RE_ISR, sc->re_intrs); 2213 2214 /* Set initial TX threshold */ 2215 sc->re_txthresh = RE_TX_THRESH_INIT; 2216 2217 /* Start RX/TX process. */ 2218 if (sc->re_flags & RE_F_HASMPC) 2219 CSR_WRITE_4(sc, RE_MISSEDPKT, 0); 2220 #ifdef notdef 2221 /* Enable receiver and transmitter. */ 2222 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB); 2223 #endif 2224 2225 if (RE_TX_MODERATION_IS_ENABLED(sc)) { 2226 /* 2227 * Initialize the timer interrupt register so that 2228 * a timer interrupt will be generated once the timer 2229 * reaches a certain number of ticks. The timer is 2230 * reloaded on each transmit. This gives us TX interrupt 2231 * moderation, which dramatically improves TX frame rate. 2232 */ 2233 if (sc->re_type == RE_8169) 2234 CSR_WRITE_4(sc, RE_TIMERINT_8169, 0x800); 2235 else 2236 CSR_WRITE_4(sc, RE_TIMERINT, 0x400); 2237 } 2238 2239 /* 2240 * For 8169 gigE NICs, set the max allowed RX packet 2241 * size so we can receive jumbo frames. 2242 */ 2243 if (sc->re_type == RE_8169) 2244 CSR_WRITE_2(sc, RE_MAXRXPKTLEN, 16383); 2245 2246 if (sc->re_testmode) { 2247 return; 2248 } 2249 2250 mii_mediachg(mii); 2251 2252 CSR_WRITE_1(sc, RE_CFG1, RE_CFG1_DRVLOAD|RE_CFG1_FULLDUPLEX); 2253 2254 ifp->if_flags |= IFF_RUNNING; 2255 ifp->if_flags &= ~IFF_OACTIVE; 2256 2257 sc->re_link = 0; 2258 callout_reset(&sc->re_timer, hz, re_tick, sc); 2259 } 2260 2261 /* 2262 * Set media options. 2263 */ 2264 static int 2265 re_ifmedia_upd(struct ifnet *ifp) 2266 { 2267 struct re_softc *sc = ifp->if_softc; 2268 struct mii_data *mii; 2269 2270 mii = device_get_softc(sc->re_miibus); 2271 mii_mediachg(mii); 2272 2273 return(0); 2274 } 2275 2276 /* 2277 * Report current media status. 2278 */ 2279 static void 2280 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2281 { 2282 struct re_softc *sc = ifp->if_softc; 2283 struct mii_data *mii; 2284 2285 mii = device_get_softc(sc->re_miibus); 2286 2287 mii_pollstat(mii); 2288 ifmr->ifm_active = mii->mii_media_active; 2289 ifmr->ifm_status = mii->mii_media_status; 2290 } 2291 2292 static int 2293 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2294 { 2295 struct re_softc *sc = ifp->if_softc; 2296 struct ifreq *ifr = (struct ifreq *) data; 2297 struct mii_data *mii; 2298 int error = 0; 2299 2300 switch(command) { 2301 case SIOCSIFMTU: 2302 if (ifr->ifr_mtu > RE_JUMBO_MTU) 2303 error = EINVAL; 2304 ifp->if_mtu = ifr->ifr_mtu; 2305 break; 2306 case SIOCSIFFLAGS: 2307 if (ifp->if_flags & IFF_UP) 2308 re_init(sc); 2309 else if (ifp->if_flags & IFF_RUNNING) 2310 re_stop(sc); 2311 break; 2312 case SIOCADDMULTI: 2313 case SIOCDELMULTI: 2314 re_setmulti(sc); 2315 error = 0; 2316 break; 2317 case SIOCGIFMEDIA: 2318 case SIOCSIFMEDIA: 2319 mii = device_get_softc(sc->re_miibus); 2320 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2321 break; 2322 case SIOCSIFCAP: 2323 ifp->if_capenable &= ~(IFCAP_HWCSUM); 2324 ifp->if_capenable |= 2325 ifr->ifr_reqcap & (IFCAP_HWCSUM); 2326 if (ifp->if_capenable & IFCAP_TXCSUM) 2327 ifp->if_hwassist = RE_CSUM_FEATURES; 2328 else 2329 ifp->if_hwassist = 0; 2330 if (ifp->if_flags & IFF_RUNNING) 2331 re_init(sc); 2332 break; 2333 default: 2334 error = ether_ioctl(ifp, command, data); 2335 break; 2336 } 2337 return(error); 2338 } 2339 2340 static void 2341 re_watchdog(struct ifnet *ifp) 2342 { 2343 struct re_softc *sc = ifp->if_softc; 2344 2345 if_printf(ifp, "watchdog timeout\n"); 2346 2347 ifp->if_oerrors++; 2348 2349 re_txeof(sc); 2350 re_rxeof(sc); 2351 2352 re_init(sc); 2353 2354 if (!ifq_is_empty(&ifp->if_snd)) 2355 if_devstart(ifp); 2356 } 2357 2358 /* 2359 * Stop the adapter and free any mbufs allocated to the 2360 * RX and TX lists. 2361 */ 2362 static void 2363 re_stop(struct re_softc *sc) 2364 { 2365 struct ifnet *ifp = &sc->arpcom.ac_if; 2366 int i; 2367 2368 ifp->if_timer = 0; 2369 callout_stop(&sc->re_timer); 2370 2371 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2372 2373 CSR_WRITE_1(sc, RE_COMMAND, 0x00); 2374 CSR_WRITE_2(sc, RE_IMR, 0x0000); 2375 CSR_WRITE_2(sc, RE_ISR, 0xFFFF); 2376 2377 if (sc->re_head != NULL) { 2378 m_freem(sc->re_head); 2379 sc->re_head = sc->re_tail = NULL; 2380 } 2381 2382 /* Free the TX list buffers. */ 2383 for (i = 0; i < RE_TX_DESC_CNT; i++) { 2384 if (sc->re_ldata.re_tx_mbuf[i] != NULL) { 2385 bus_dmamap_unload(sc->re_ldata.re_mtag, 2386 sc->re_ldata.re_tx_dmamap[i]); 2387 m_freem(sc->re_ldata.re_tx_mbuf[i]); 2388 sc->re_ldata.re_tx_mbuf[i] = NULL; 2389 } 2390 } 2391 2392 /* Free the RX list buffers. */ 2393 for (i = 0; i < RE_RX_DESC_CNT; i++) { 2394 if (sc->re_ldata.re_rx_mbuf[i] != NULL) { 2395 bus_dmamap_unload(sc->re_ldata.re_mtag, 2396 sc->re_ldata.re_rx_dmamap[i]); 2397 m_freem(sc->re_ldata.re_rx_mbuf[i]); 2398 sc->re_ldata.re_rx_mbuf[i] = NULL; 2399 } 2400 } 2401 } 2402 2403 /* 2404 * Device suspend routine. Stop the interface and save some PCI 2405 * settings in case the BIOS doesn't restore them properly on 2406 * resume. 2407 */ 2408 static int 2409 re_suspend(device_t dev) 2410 { 2411 #ifndef BURN_BRIDGES 2412 int i; 2413 #endif 2414 struct re_softc *sc = device_get_softc(dev); 2415 2416 re_stop(sc); 2417 2418 #ifndef BURN_BRIDGES 2419 for (i = 0; i < 5; i++) 2420 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2421 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2422 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2423 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2424 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2425 #endif 2426 2427 sc->suspended = 1; 2428 2429 return (0); 2430 } 2431 2432 /* 2433 * Device resume routine. Restore some PCI settings in case the BIOS 2434 * doesn't, re-enable busmastering, and restart the interface if 2435 * appropriate. 2436 */ 2437 static int 2438 re_resume(device_t dev) 2439 { 2440 struct re_softc *sc = device_get_softc(dev); 2441 struct ifnet *ifp = &sc->arpcom.ac_if; 2442 #ifndef BURN_BRIDGES 2443 int i; 2444 #endif 2445 2446 #ifndef BURN_BRIDGES 2447 /* better way to do this? */ 2448 for (i = 0; i < 5; i++) 2449 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 2450 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 2451 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 2452 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 2453 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 2454 2455 /* reenable busmastering */ 2456 pci_enable_busmaster(dev); 2457 pci_enable_io(dev, SYS_RES_IOPORT); 2458 #endif 2459 2460 /* reinitialize interface if necessary */ 2461 if (ifp->if_flags & IFF_UP) 2462 re_init(sc); 2463 2464 sc->suspended = 0; 2465 2466 return (0); 2467 } 2468 2469 /* 2470 * Stop all chip I/O so that the kernel's probe routines don't 2471 * get confused by errant DMAs when rebooting. 2472 */ 2473 static void 2474 re_shutdown(device_t dev) 2475 { 2476 struct re_softc *sc = device_get_softc(dev); 2477 struct ifnet *ifp = &sc->arpcom.ac_if; 2478 2479 lwkt_serialize_enter(ifp->if_serializer); 2480 re_stop(sc); 2481 lwkt_serialize_exit(ifp->if_serializer); 2482 } 2483 2484 static int 2485 re_sysctl_tx_moderation(SYSCTL_HANDLER_ARGS) 2486 { 2487 struct re_softc *sc = arg1; 2488 struct ifnet *ifp = &sc->arpcom.ac_if; 2489 int error = 0, mod, mod_old; 2490 2491 lwkt_serialize_enter(ifp->if_serializer); 2492 2493 mod_old = mod = RE_TX_MODERATION_IS_ENABLED(sc); 2494 2495 error = sysctl_handle_int(oidp, &mod, 0, req); 2496 if (error || req->newptr == NULL || mod == mod_old) 2497 goto back; 2498 if (mod != 0 && mod != 1) { 2499 error = EINVAL; 2500 goto back; 2501 } 2502 2503 if (mod) 2504 RE_ENABLE_TX_MODERATION(sc); 2505 else 2506 RE_DISABLE_TX_MODERATION(sc); 2507 2508 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP)) == (IFF_RUNNING | IFF_UP)) 2509 re_init(sc); 2510 back: 2511 lwkt_serialize_exit(ifp->if_serializer); 2512 return error; 2513 } 2514