1 /* 2 * Copyright (c) 2004 3 * Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 4 * 5 * Copyright (c) 1997, 1998-2003 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $ 36 * $DragonFly: src/sys/dev/netif/re/if_re.c,v 1.99 2008/10/30 11:27:40 sephe Exp $ 37 */ 38 39 /* 40 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 41 * 42 * Written by Bill Paul <wpaul@windriver.com> 43 * Senior Networking Software Engineer 44 * Wind River Systems 45 */ 46 47 /* 48 * This driver is designed to support RealTek's next generation of 49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 50 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 51 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 52 * 53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 54 * with the older 8139 family, however it also supports a special 55 * C+ mode of operation that provides several new performance enhancing 56 * features. These include: 57 * 58 * o Descriptor based DMA mechanism. Each descriptor represents 59 * a single packet fragment. Data buffers may be aligned on 60 * any byte boundary. 61 * 62 * o 64-bit DMA 63 * 64 * o TCP/IP checksum offload for both RX and TX 65 * 66 * o High and normal priority transmit DMA rings 67 * 68 * o VLAN tag insertion and extraction 69 * 70 * o TCP large send (segmentation offload) 71 * 72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 73 * programming API is fairly straightforward. The RX filtering, EEPROM 74 * access and PHY access is the same as it is on the older 8139 series 75 * chips. 76 * 77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 78 * same programming API and feature set as the 8139C+ with the following 79 * differences and additions: 80 * 81 * o 1000Mbps mode 82 * 83 * o Jumbo frames 84 * 85 * o GMII and TBI ports/registers for interfacing with copper 86 * or fiber PHYs 87 * 88 * o RX and TX DMA rings can have up to 1024 descriptors 89 * (the 8139C+ allows a maximum of 64) 90 * 91 * o Slight differences in register layout from the 8139C+ 92 * 93 * The TX start and timer interrupt registers are at different locations 94 * on the 8169 than they are on the 8139C+. Also, the status word in the 95 * RX descriptor has a slightly different bit layout. The 8169 does not 96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 97 * copper gigE PHY. 98 * 99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 100 * (the 'S' stands for 'single-chip'). These devices have the same 101 * programming API as the older 8169, but also have some vendor-specific 102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 104 * 105 * This driver takes advantage of the RX and TX checksum offload and 106 * VLAN tag insertion/extraction features. It also implements TX 107 * interrupt moderation using the timer interrupt registers, which 108 * significantly reduces TX interrupt load. There is also support 109 * for jumbo frames, however the 8169/8169S/8110S can not transmit 110 * jumbo frames larger than 7440, so the max MTU possible with this 111 * driver is 7422 bytes. 112 */ 113 114 #define _IP_VHL 115 116 #include "opt_polling.h" 117 118 #include <sys/param.h> 119 #include <sys/bus.h> 120 #include <sys/endian.h> 121 #include <sys/kernel.h> 122 #include <sys/in_cksum.h> 123 #include <sys/interrupt.h> 124 #include <sys/malloc.h> 125 #include <sys/mbuf.h> 126 #include <sys/rman.h> 127 #include <sys/serialize.h> 128 #include <sys/socket.h> 129 #include <sys/sockio.h> 130 #include <sys/sysctl.h> 131 132 #include <net/bpf.h> 133 #include <net/ethernet.h> 134 #include <net/if.h> 135 #include <net/ifq_var.h> 136 #include <net/if_arp.h> 137 #include <net/if_dl.h> 138 #include <net/if_media.h> 139 #include <net/if_types.h> 140 #include <net/vlan/if_vlan_var.h> 141 #include <net/vlan/if_vlan_ether.h> 142 143 #include <netinet/ip.h> 144 145 #include <dev/netif/mii_layer/mii.h> 146 #include <dev/netif/mii_layer/miivar.h> 147 148 #include <bus/pci/pcidevs.h> 149 #include <bus/pci/pcireg.h> 150 #include <bus/pci/pcivar.h> 151 152 /* "device miibus" required. See GENERIC if you get errors here. */ 153 #include "miibus_if.h" 154 155 #include <dev/netif/re/if_rereg.h> 156 #include <dev/netif/re/if_revar.h> 157 158 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 159 160 /* 161 * Various supported device vendors/types and their names. 162 */ 163 static const struct re_type { 164 uint16_t re_vid; 165 uint16_t re_did; 166 const char *re_name; 167 } re_devs[] = { 168 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T, 169 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 170 171 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8139, 172 "RealTek 8139C+ 10/100BaseTX" }, 173 174 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8101E, 175 "RealTek 810x PCIe 10/100baseTX" }, 176 177 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168, 178 "RealTek 8111/8168 PCIe Gigabit Ethernet" }, 179 180 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, 181 "RealTek 8110/8169 Gigabit Ethernet" }, 182 183 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169SC, 184 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 185 186 { PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT, 187 "Corega CG-LAPCIGT Gigabit Ethernet" }, 188 189 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032, 190 "Linksys EG1032 Gigabit Ethernet" }, 191 192 { PCI_VENDOR_USR2, PCI_PRODUCT_USR2_997902, 193 "US Robotics 997902 Gigabit Ethernet" }, 194 195 { 0, 0, NULL } 196 }; 197 198 static const struct re_hwrev re_hwrevs[] = { 199 { RE_HWREV_8139CPLUS, RE_MACVER_UNKN, ETHERMTU, 200 RE_C_HWCSUM | RE_C_8139CP }, 201 202 { RE_HWREV_8169, RE_MACVER_UNKN, RE_MTU_6K, 203 RE_C_HWCSUM | RE_C_8169 }, 204 205 { RE_HWREV_8110S, RE_MACVER_03, RE_MTU_6K, 206 RE_C_HWCSUM | RE_C_8169 }, 207 208 { RE_HWREV_8169S, RE_MACVER_03, RE_MTU_6K, 209 RE_C_HWCSUM | RE_C_8169 }, 210 211 { RE_HWREV_8169SB, RE_MACVER_04, RE_MTU_6K, 212 RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_8169 }, 213 214 { RE_HWREV_8169SC1, RE_MACVER_05, RE_MTU_6K, 215 RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_8169 }, 216 217 { RE_HWREV_8169SC2, RE_MACVER_06, RE_MTU_6K, 218 RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_8169 }, 219 220 { RE_HWREV_8168B1, RE_MACVER_21, RE_MTU_6K, 221 RE_C_HWIM | RE_C_HWCSUM | RE_C_PHYPMGT }, 222 223 { RE_HWREV_8168B2, RE_MACVER_23, RE_MTU_6K, 224 RE_C_HWIM | RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_AUTOPAD }, 225 226 { RE_HWREV_8168B3, RE_MACVER_23, RE_MTU_6K, 227 RE_C_HWIM | RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_AUTOPAD }, 228 229 { RE_HWREV_8168C, RE_MACVER_29, RE_MTU_6K, 230 RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT | 231 RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX }, 232 233 { RE_HWREV_8168CP, RE_MACVER_2B, RE_MTU_6K, 234 RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT | 235 RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX }, 236 237 { RE_HWREV_8168D, RE_MACVER_2A, RE_MTU_9K, 238 RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT | 239 RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX }, 240 241 { RE_HWREV_8100E, RE_MACVER_UNKN, ETHERMTU, 242 RE_C_HWCSUM }, 243 244 { RE_HWREV_8101E1, RE_MACVER_16, ETHERMTU, 245 RE_C_HWCSUM }, 246 247 { RE_HWREV_8101E2, RE_MACVER_16, ETHERMTU, 248 RE_C_HWCSUM }, 249 250 { RE_HWREV_8102E, RE_MACVER_15, ETHERMTU, 251 RE_C_HWCSUM | RE_C_MAC2 | RE_C_AUTOPAD | RE_C_STOP_RXTX }, 252 253 { RE_HWREV_8102EL, RE_MACVER_15, ETHERMTU, 254 RE_C_HWCSUM | RE_C_MAC2 | RE_C_AUTOPAD | RE_C_STOP_RXTX }, 255 256 { RE_HWREV_NULL, 0, 0, 0 } 257 }; 258 259 static int re_probe(device_t); 260 static int re_attach(device_t); 261 static int re_detach(device_t); 262 static int re_suspend(device_t); 263 static int re_resume(device_t); 264 static void re_shutdown(device_t); 265 266 static void re_dma_map_addr(void *, bus_dma_segment_t *, int, int); 267 static void re_dma_map_desc(void *, bus_dma_segment_t *, int, 268 bus_size_t, int); 269 static int re_allocmem(device_t); 270 static void re_freemem(device_t); 271 static void re_freebufmem(struct re_softc *, int, int); 272 static int re_encap(struct re_softc *, struct mbuf **, int *); 273 static int re_newbuf_std(struct re_softc *, int, int); 274 static int re_newbuf_jumbo(struct re_softc *, int, int); 275 static void re_setup_rxdesc(struct re_softc *, int); 276 static int re_rx_list_init(struct re_softc *); 277 static int re_tx_list_init(struct re_softc *); 278 static int re_rxeof(struct re_softc *); 279 static int re_txeof(struct re_softc *); 280 static int re_tx_collect(struct re_softc *); 281 static void re_intr(void *); 282 static void re_tick(void *); 283 static void re_tick_serialized(void *); 284 285 static void re_start(struct ifnet *); 286 static int re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 287 static void re_init(void *); 288 static void re_stop(struct re_softc *); 289 static void re_watchdog(struct ifnet *); 290 static int re_ifmedia_upd(struct ifnet *); 291 static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 292 293 static void re_eeprom_putbyte(struct re_softc *, int); 294 static void re_eeprom_getword(struct re_softc *, int, u_int16_t *); 295 static void re_read_eeprom(struct re_softc *, caddr_t, int, int); 296 static void re_get_eewidth(struct re_softc *); 297 298 static int re_gmii_readreg(device_t, int, int); 299 static int re_gmii_writereg(device_t, int, int, int); 300 301 static int re_miibus_readreg(device_t, int, int); 302 static int re_miibus_writereg(device_t, int, int, int); 303 static void re_miibus_statchg(device_t); 304 305 static void re_setmulti(struct re_softc *); 306 static void re_reset(struct re_softc *, int); 307 static void re_get_eaddr(struct re_softc *, uint8_t *); 308 309 static void re_setup_hw_im(struct re_softc *); 310 static void re_setup_sim_im(struct re_softc *); 311 static void re_disable_hw_im(struct re_softc *); 312 static void re_disable_sim_im(struct re_softc *); 313 static void re_config_imtype(struct re_softc *, int); 314 static void re_setup_intr(struct re_softc *, int, int); 315 316 static int re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *); 317 static int re_sysctl_rxtime(SYSCTL_HANDLER_ARGS); 318 static int re_sysctl_txtime(SYSCTL_HANDLER_ARGS); 319 static int re_sysctl_simtime(SYSCTL_HANDLER_ARGS); 320 static int re_sysctl_imtype(SYSCTL_HANDLER_ARGS); 321 322 static int re_jpool_alloc(struct re_softc *); 323 static void re_jpool_free(struct re_softc *); 324 static struct re_jbuf *re_jbuf_alloc(struct re_softc *); 325 static void re_jbuf_free(void *); 326 static void re_jbuf_ref(void *); 327 328 #ifdef RE_DIAG 329 static int re_diag(struct re_softc *); 330 #endif 331 332 #ifdef DEVICE_POLLING 333 static void re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 334 #endif 335 336 static device_method_t re_methods[] = { 337 /* Device interface */ 338 DEVMETHOD(device_probe, re_probe), 339 DEVMETHOD(device_attach, re_attach), 340 DEVMETHOD(device_detach, re_detach), 341 DEVMETHOD(device_suspend, re_suspend), 342 DEVMETHOD(device_resume, re_resume), 343 DEVMETHOD(device_shutdown, re_shutdown), 344 345 /* bus interface */ 346 DEVMETHOD(bus_print_child, bus_generic_print_child), 347 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 348 349 /* MII interface */ 350 DEVMETHOD(miibus_readreg, re_miibus_readreg), 351 DEVMETHOD(miibus_writereg, re_miibus_writereg), 352 DEVMETHOD(miibus_statchg, re_miibus_statchg), 353 354 { 0, 0 } 355 }; 356 357 static driver_t re_driver = { 358 "re", 359 re_methods, 360 sizeof(struct re_softc) 361 }; 362 363 static devclass_t re_devclass; 364 365 DECLARE_DUMMY_MODULE(if_re); 366 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, 0, 0); 367 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, 0, 0); 368 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 369 370 static int re_rx_desc_count = RE_RX_DESC_CNT_DEF; 371 static int re_tx_desc_count = RE_TX_DESC_CNT_DEF; 372 373 TUNABLE_INT("hw.re.rx_desc_count", &re_rx_desc_count); 374 TUNABLE_INT("hw.re.tx_desc_count", &re_tx_desc_count); 375 376 #define EE_SET(x) \ 377 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) | (x)) 378 379 #define EE_CLR(x) \ 380 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) & ~(x)) 381 382 static __inline void 383 re_free_rxchain(struct re_softc *sc) 384 { 385 if (sc->re_head != NULL) { 386 m_freem(sc->re_head); 387 sc->re_head = sc->re_tail = NULL; 388 } 389 } 390 391 /* 392 * Send a read command and address to the EEPROM, check for ACK. 393 */ 394 static void 395 re_eeprom_putbyte(struct re_softc *sc, int addr) 396 { 397 int d, i; 398 399 d = addr | (RE_9346_READ << sc->re_eewidth); 400 401 /* 402 * Feed in each bit and strobe the clock. 403 */ 404 for (i = 1 << (sc->re_eewidth + 3); i; i >>= 1) { 405 if (d & i) 406 EE_SET(RE_EE_DATAIN); 407 else 408 EE_CLR(RE_EE_DATAIN); 409 DELAY(100); 410 EE_SET(RE_EE_CLK); 411 DELAY(150); 412 EE_CLR(RE_EE_CLK); 413 DELAY(100); 414 } 415 } 416 417 /* 418 * Read a word of data stored in the EEPROM at address 'addr.' 419 */ 420 static void 421 re_eeprom_getword(struct re_softc *sc, int addr, uint16_t *dest) 422 { 423 int i; 424 uint16_t word = 0; 425 426 /* 427 * Send address of word we want to read. 428 */ 429 re_eeprom_putbyte(sc, addr); 430 431 /* 432 * Start reading bits from EEPROM. 433 */ 434 for (i = 0x8000; i != 0; i >>= 1) { 435 EE_SET(RE_EE_CLK); 436 DELAY(100); 437 if (CSR_READ_1(sc, RE_EECMD) & RE_EE_DATAOUT) 438 word |= i; 439 EE_CLR(RE_EE_CLK); 440 DELAY(100); 441 } 442 443 *dest = word; 444 } 445 446 /* 447 * Read a sequence of words from the EEPROM. 448 */ 449 static void 450 re_read_eeprom(struct re_softc *sc, caddr_t dest, int off, int cnt) 451 { 452 int i; 453 uint16_t word = 0, *ptr; 454 455 CSR_SETBIT_1(sc, RE_EECMD, RE_EEMODE_PROGRAM); 456 DELAY(100); 457 458 for (i = 0; i < cnt; i++) { 459 CSR_SETBIT_1(sc, RE_EECMD, RE_EE_SEL); 460 re_eeprom_getword(sc, off + i, &word); 461 CSR_CLRBIT_1(sc, RE_EECMD, RE_EE_SEL); 462 ptr = (uint16_t *)(dest + (i * 2)); 463 *ptr = word; 464 } 465 466 CSR_CLRBIT_1(sc, RE_EECMD, RE_EEMODE_PROGRAM); 467 } 468 469 static void 470 re_get_eewidth(struct re_softc *sc) 471 { 472 uint16_t re_did = 0; 473 474 sc->re_eewidth = 6; 475 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 476 if (re_did != 0x8129) 477 sc->re_eewidth = 8; 478 } 479 480 static int 481 re_gmii_readreg(device_t dev, int phy, int reg) 482 { 483 struct re_softc *sc = device_get_softc(dev); 484 u_int32_t rval; 485 int i; 486 487 if (phy != 1) 488 return(0); 489 490 /* Let the rgephy driver read the GMEDIASTAT register */ 491 492 if (reg == RE_GMEDIASTAT) 493 return(CSR_READ_1(sc, RE_GMEDIASTAT)); 494 495 CSR_WRITE_4(sc, RE_PHYAR, reg << 16); 496 DELAY(1000); 497 498 for (i = 0; i < RE_TIMEOUT; i++) { 499 rval = CSR_READ_4(sc, RE_PHYAR); 500 if (rval & RE_PHYAR_BUSY) 501 break; 502 DELAY(100); 503 } 504 505 if (i == RE_TIMEOUT) { 506 device_printf(dev, "PHY read failed\n"); 507 return(0); 508 } 509 510 return(rval & RE_PHYAR_PHYDATA); 511 } 512 513 static int 514 re_gmii_writereg(device_t dev, int phy, int reg, int data) 515 { 516 struct re_softc *sc = device_get_softc(dev); 517 uint32_t rval; 518 int i; 519 520 CSR_WRITE_4(sc, RE_PHYAR, 521 (reg << 16) | (data & RE_PHYAR_PHYDATA) | RE_PHYAR_BUSY); 522 DELAY(1000); 523 524 for (i = 0; i < RE_TIMEOUT; i++) { 525 rval = CSR_READ_4(sc, RE_PHYAR); 526 if ((rval & RE_PHYAR_BUSY) == 0) 527 break; 528 DELAY(100); 529 } 530 531 if (i == RE_TIMEOUT) 532 device_printf(dev, "PHY write failed\n"); 533 534 return(0); 535 } 536 537 static int 538 re_miibus_readreg(device_t dev, int phy, int reg) 539 { 540 struct re_softc *sc = device_get_softc(dev); 541 uint16_t rval = 0; 542 uint16_t re8139_reg = 0; 543 544 if (!RE_IS_8139CP(sc)) { 545 rval = re_gmii_readreg(dev, phy, reg); 546 return(rval); 547 } 548 549 /* Pretend the internal PHY is only at address 0 */ 550 if (phy) 551 return(0); 552 553 switch(reg) { 554 case MII_BMCR: 555 re8139_reg = RE_BMCR; 556 break; 557 case MII_BMSR: 558 re8139_reg = RE_BMSR; 559 break; 560 case MII_ANAR: 561 re8139_reg = RE_ANAR; 562 break; 563 case MII_ANER: 564 re8139_reg = RE_ANER; 565 break; 566 case MII_ANLPAR: 567 re8139_reg = RE_LPAR; 568 break; 569 case MII_PHYIDR1: 570 case MII_PHYIDR2: 571 return(0); 572 /* 573 * Allow the rlphy driver to read the media status 574 * register. If we have a link partner which does not 575 * support NWAY, this is the register which will tell 576 * us the results of parallel detection. 577 */ 578 case RE_MEDIASTAT: 579 return(CSR_READ_1(sc, RE_MEDIASTAT)); 580 default: 581 device_printf(dev, "bad phy register\n"); 582 return(0); 583 } 584 rval = CSR_READ_2(sc, re8139_reg); 585 if (re8139_reg == RE_BMCR) { 586 /* 8139C+ has different bit layout. */ 587 rval &= ~(BMCR_LOOP | BMCR_ISO); 588 } 589 return(rval); 590 } 591 592 static int 593 re_miibus_writereg(device_t dev, int phy, int reg, int data) 594 { 595 struct re_softc *sc= device_get_softc(dev); 596 u_int16_t re8139_reg = 0; 597 598 if (!RE_IS_8139CP(sc)) 599 return(re_gmii_writereg(dev, phy, reg, data)); 600 601 /* Pretend the internal PHY is only at address 0 */ 602 if (phy) 603 return(0); 604 605 switch(reg) { 606 case MII_BMCR: 607 re8139_reg = RE_BMCR; 608 /* 8139C+ has different bit layout. */ 609 data &= ~(BMCR_LOOP | BMCR_ISO); 610 break; 611 case MII_BMSR: 612 re8139_reg = RE_BMSR; 613 break; 614 case MII_ANAR: 615 re8139_reg = RE_ANAR; 616 break; 617 case MII_ANER: 618 re8139_reg = RE_ANER; 619 break; 620 case MII_ANLPAR: 621 re8139_reg = RE_LPAR; 622 break; 623 case MII_PHYIDR1: 624 case MII_PHYIDR2: 625 return(0); 626 default: 627 device_printf(dev, "bad phy register\n"); 628 return(0); 629 } 630 CSR_WRITE_2(sc, re8139_reg, data); 631 return(0); 632 } 633 634 static void 635 re_miibus_statchg(device_t dev) 636 { 637 } 638 639 /* 640 * Program the 64-bit multicast hash filter. 641 */ 642 static void 643 re_setmulti(struct re_softc *sc) 644 { 645 struct ifnet *ifp = &sc->arpcom.ac_if; 646 int h = 0; 647 uint32_t hashes[2] = { 0, 0 }; 648 struct ifmultiaddr *ifma; 649 uint32_t rxfilt; 650 int mcnt = 0; 651 652 rxfilt = CSR_READ_4(sc, RE_RXCFG); 653 654 /* Set the individual bit to receive frames for this host only. */ 655 rxfilt |= RE_RXCFG_RX_INDIV; 656 /* Set capture broadcast bit to capture broadcast frames. */ 657 rxfilt |= RE_RXCFG_RX_BROAD; 658 659 rxfilt &= ~(RE_RXCFG_RX_ALLPHYS | RE_RXCFG_RX_MULTI); 660 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) { 661 rxfilt |= RE_RXCFG_RX_MULTI; 662 663 /* If we want promiscuous mode, set the allframes bit. */ 664 if (ifp->if_flags & IFF_PROMISC) 665 rxfilt |= RE_RXCFG_RX_ALLPHYS; 666 667 CSR_WRITE_4(sc, RE_RXCFG, rxfilt); 668 CSR_WRITE_4(sc, RE_MAR0, 0xFFFFFFFF); 669 CSR_WRITE_4(sc, RE_MAR4, 0xFFFFFFFF); 670 return; 671 } 672 673 /* first, zot all the existing hash bits */ 674 CSR_WRITE_4(sc, RE_MAR0, 0); 675 CSR_WRITE_4(sc, RE_MAR4, 0); 676 677 /* now program new ones */ 678 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 679 if (ifma->ifma_addr->sa_family != AF_LINK) 680 continue; 681 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 682 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 683 if (h < 32) 684 hashes[0] |= (1 << h); 685 else 686 hashes[1] |= (1 << (h - 32)); 687 mcnt++; 688 } 689 690 if (mcnt) 691 rxfilt |= RE_RXCFG_RX_MULTI; 692 else 693 rxfilt &= ~RE_RXCFG_RX_MULTI; 694 695 CSR_WRITE_4(sc, RE_RXCFG, rxfilt); 696 697 /* 698 * For some unfathomable reason, RealTek decided to reverse 699 * the order of the multicast hash registers in the PCI Express 700 * parts. This means we have to write the hash pattern in reverse 701 * order for those devices. 702 */ 703 if (sc->re_caps & RE_C_PCIE) { 704 CSR_WRITE_4(sc, RE_MAR0, bswap32(hashes[0])); 705 CSR_WRITE_4(sc, RE_MAR4, bswap32(hashes[1])); 706 } else { 707 CSR_WRITE_4(sc, RE_MAR0, hashes[0]); 708 CSR_WRITE_4(sc, RE_MAR4, hashes[1]); 709 } 710 } 711 712 static void 713 re_reset(struct re_softc *sc, int running) 714 { 715 int i; 716 717 if ((sc->re_caps & RE_C_STOP_RXTX) && running) { 718 CSR_WRITE_1(sc, RE_COMMAND, 719 RE_CMD_STOPREQ | RE_CMD_TX_ENB | RE_CMD_RX_ENB); 720 DELAY(100); 721 } 722 723 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_RESET); 724 725 for (i = 0; i < RE_TIMEOUT; i++) { 726 DELAY(10); 727 if ((CSR_READ_1(sc, RE_COMMAND) & RE_CMD_RESET) == 0) 728 break; 729 } 730 if (i == RE_TIMEOUT) 731 if_printf(&sc->arpcom.ac_if, "reset never completed!\n"); 732 } 733 734 #ifdef RE_DIAG 735 /* 736 * The following routine is designed to test for a defect on some 737 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 738 * lines connected to the bus, however for a 32-bit only card, they 739 * should be pulled high. The result of this defect is that the 740 * NIC will not work right if you plug it into a 64-bit slot: DMA 741 * operations will be done with 64-bit transfers, which will fail 742 * because the 64-bit data lines aren't connected. 743 * 744 * There's no way to work around this (short of talking a soldering 745 * iron to the board), however we can detect it. The method we use 746 * here is to put the NIC into digital loopback mode, set the receiver 747 * to promiscuous mode, and then try to send a frame. We then compare 748 * the frame data we sent to what was received. If the data matches, 749 * then the NIC is working correctly, otherwise we know the user has 750 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 751 * slot. In the latter case, there's no way the NIC can work correctly, 752 * so we print out a message on the console and abort the device attach. 753 */ 754 755 static int 756 re_diag(struct re_softc *sc) 757 { 758 struct ifnet *ifp = &sc->arpcom.ac_if; 759 struct mbuf *m0; 760 struct ether_header *eh; 761 struct re_desc *cur_rx; 762 uint16_t status; 763 uint32_t rxstat; 764 int total_len, i, error = 0, phyaddr; 765 uint8_t dst[ETHER_ADDR_LEN] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 766 uint8_t src[ETHER_ADDR_LEN] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 767 768 /* Allocate a single mbuf */ 769 770 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 771 if (m0 == NULL) 772 return(ENOBUFS); 773 774 /* 775 * Initialize the NIC in test mode. This sets the chip up 776 * so that it can send and receive frames, but performs the 777 * following special functions: 778 * - Puts receiver in promiscuous mode 779 * - Enables digital loopback mode 780 * - Leaves interrupts turned off 781 */ 782 783 ifp->if_flags |= IFF_PROMISC; 784 sc->re_flags |= RE_F_TESTMODE; 785 re_init(sc); 786 sc->re_flags |= RE_F_LINKED; 787 if (!RE_IS_8139CP(sc)) 788 phyaddr = 1; 789 else 790 phyaddr = 0; 791 792 re_miibus_writereg(sc->re_dev, phyaddr, MII_BMCR, BMCR_RESET); 793 for (i = 0; i < RE_TIMEOUT; i++) { 794 status = re_miibus_readreg(sc->re_dev, phyaddr, MII_BMCR); 795 if (!(status & BMCR_RESET)) 796 break; 797 } 798 799 re_miibus_writereg(sc->re_dev, phyaddr, MII_BMCR, BMCR_LOOP); 800 CSR_WRITE_2(sc, RE_ISR, RE_INTRS_DIAG); 801 802 DELAY(100000); 803 804 /* Put some data in the mbuf */ 805 806 eh = mtod(m0, struct ether_header *); 807 bcopy (dst, eh->ether_dhost, ETHER_ADDR_LEN); 808 bcopy (src, eh->ether_shost, ETHER_ADDR_LEN); 809 eh->ether_type = htons(ETHERTYPE_IP); 810 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 811 812 /* 813 * Queue the packet, start transmission. 814 * Note: ifq_handoff() ultimately calls re_start() for us. 815 */ 816 817 CSR_WRITE_2(sc, RE_ISR, 0xFFFF); 818 error = ifq_handoff(ifp, m0, NULL); 819 if (error) { 820 m0 = NULL; 821 goto done; 822 } 823 m0 = NULL; 824 825 /* Wait for it to propagate through the chip */ 826 827 DELAY(100000); 828 for (i = 0; i < RE_TIMEOUT; i++) { 829 status = CSR_READ_2(sc, RE_ISR); 830 CSR_WRITE_2(sc, RE_ISR, status); 831 if ((status & (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) == 832 (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) 833 break; 834 DELAY(10); 835 } 836 837 if (i == RE_TIMEOUT) { 838 if_printf(ifp, "diagnostic failed to receive packet " 839 "in loopback mode\n"); 840 error = EIO; 841 goto done; 842 } 843 844 /* 845 * The packet should have been dumped into the first 846 * entry in the RX DMA ring. Grab it from there. 847 */ 848 849 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 850 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD); 851 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0], 852 BUS_DMASYNC_POSTWRITE); 853 bus_dmamap_unload(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0]); 854 855 m0 = sc->re_ldata.re_rx_mbuf[0]; 856 sc->re_ldata.re_rx_mbuf[0] = NULL; 857 eh = mtod(m0, struct ether_header *); 858 859 cur_rx = &sc->re_ldata.re_rx_list[0]; 860 total_len = RE_RXBYTES(cur_rx); 861 rxstat = le32toh(cur_rx->re_cmdstat); 862 863 if (total_len != ETHER_MIN_LEN) { 864 if_printf(ifp, "diagnostic failed, received short packet\n"); 865 error = EIO; 866 goto done; 867 } 868 869 /* Test that the received packet data matches what we sent. */ 870 871 if (bcmp(eh->ether_dhost, dst, ETHER_ADDR_LEN) || 872 bcmp(eh->ether_shost, &src, ETHER_ADDR_LEN) || 873 be16toh(eh->ether_type) != ETHERTYPE_IP) { 874 if_printf(ifp, "WARNING, DMA FAILURE!\n"); 875 if_printf(ifp, "expected TX data: %6D/%6D/0x%x\n", 876 dst, ":", src, ":", ETHERTYPE_IP); 877 if_printf(ifp, "received RX data: %6D/%6D/0x%x\n", 878 eh->ether_dhost, ":", eh->ether_shost, ":", 879 ntohs(eh->ether_type)); 880 if_printf(ifp, "You may have a defective 32-bit NIC plugged " 881 "into a 64-bit PCI slot.\n"); 882 if_printf(ifp, "Please re-install the NIC in a 32-bit slot " 883 "for proper operation.\n"); 884 if_printf(ifp, "Read the re(4) man page for more details.\n"); 885 error = EIO; 886 } 887 888 done: 889 /* Turn interface off, release resources */ 890 891 sc->re_flags &= ~(RE_F_LINKED | RE_F_TESTMODE); 892 ifp->if_flags &= ~IFF_PROMISC; 893 re_stop(sc); 894 if (m0 != NULL) 895 m_freem(m0); 896 897 return (error); 898 } 899 #endif /* RE_DIAG */ 900 901 /* 902 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 903 * IDs against our list and return a device name if we find a match. 904 */ 905 static int 906 re_probe(device_t dev) 907 { 908 const struct re_type *t; 909 const struct re_hwrev *hw_rev; 910 struct re_softc *sc; 911 int rid; 912 uint32_t hwrev, macmode, txcfg; 913 uint16_t vendor, product; 914 915 vendor = pci_get_vendor(dev); 916 product = pci_get_device(dev); 917 918 /* 919 * Only attach to rev.3 of the Linksys EG1032 adapter. 920 * Rev.2 is supported by sk(4). 921 */ 922 if (vendor == PCI_VENDOR_LINKSYS && 923 product == PCI_PRODUCT_LINKSYS_EG1032 && 924 pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3) 925 return ENXIO; 926 927 for (t = re_devs; t->re_name != NULL; t++) { 928 if (product == t->re_did && vendor == t->re_vid) 929 break; 930 } 931 932 /* 933 * Check if we found a RealTek device. 934 */ 935 if (t->re_name == NULL) 936 return ENXIO; 937 938 /* 939 * Temporarily map the I/O space so we can read the chip ID register. 940 */ 941 sc = kmalloc(sizeof(*sc), M_TEMP, M_WAITOK | M_ZERO); 942 rid = RE_PCI_LOIO; 943 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 944 RF_ACTIVE); 945 if (sc->re_res == NULL) { 946 device_printf(dev, "couldn't map ports/memory\n"); 947 kfree(sc, M_TEMP); 948 return ENXIO; 949 } 950 951 sc->re_btag = rman_get_bustag(sc->re_res); 952 sc->re_bhandle = rman_get_bushandle(sc->re_res); 953 954 txcfg = CSR_READ_4(sc, RE_TXCFG); 955 hwrev = txcfg & RE_TXCFG_HWREV; 956 macmode = txcfg & RE_TXCFG_MACMODE; 957 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, sc->re_res); 958 kfree(sc, M_TEMP); 959 960 /* 961 * and continue matching for the specific chip... 962 */ 963 for (hw_rev = re_hwrevs; hw_rev->re_hwrev != RE_HWREV_NULL; hw_rev++) { 964 if (hw_rev->re_hwrev == hwrev) { 965 sc = device_get_softc(dev); 966 967 sc->re_hwrev = hw_rev->re_hwrev; 968 sc->re_macver = hw_rev->re_macver; 969 sc->re_caps = hw_rev->re_caps; 970 sc->re_maxmtu = hw_rev->re_maxmtu; 971 sc->re_swcsum_lim = RE_SWCSUM_UNLIMITED; 972 973 /* 974 * Apply chip property fixup 975 */ 976 switch (sc->re_hwrev) { 977 case RE_HWREV_8169: 978 sc->re_swcsum_lim = RE_SWCSUM_LIM_8169; 979 break; 980 case RE_HWREV_8101E1: 981 case RE_HWREV_8101E2: 982 if (macmode == 0) 983 sc->re_macver = RE_MACVER_11; 984 else if (macmode == 0x200000) 985 sc->re_macver = RE_MACVER_12; 986 break; 987 case RE_HWREV_8102E: 988 case RE_HWREV_8102EL: 989 if (macmode == 0) 990 sc->re_macver = RE_MACVER_13; 991 else if (macmode == 0x100000) 992 sc->re_macver = RE_MACVER_14; 993 break; 994 case RE_HWREV_8168B2: 995 case RE_HWREV_8168B3: 996 if (macmode == 0) 997 sc->re_macver = RE_MACVER_22; 998 break; 999 case RE_HWREV_8168C: 1000 if (macmode == 0) 1001 sc->re_macver = RE_MACVER_24; 1002 else if (macmode == 0x200000) 1003 sc->re_macver = RE_MACVER_25; 1004 else if (macmode == 0x300000) 1005 sc->re_macver = RE_MACVER_27; 1006 break; 1007 case RE_HWREV_8168CP: 1008 if (macmode == 0) 1009 sc->re_macver = RE_MACVER_26; 1010 else if (macmode == 0x100000) 1011 sc->re_macver = RE_MACVER_28; 1012 break; 1013 } 1014 if (pci_get_pciecap_ptr(dev) != 0) 1015 sc->re_caps |= RE_C_PCIE; 1016 1017 device_set_desc(dev, t->re_name); 1018 return 0; 1019 } 1020 } 1021 1022 if (bootverbose) { 1023 device_printf(dev, "unknown hwrev 0x%08x, macmode 0x%08x\n", 1024 hwrev, macmode); 1025 } 1026 return ENXIO; 1027 } 1028 1029 static void 1030 re_dma_map_desc(void *xarg, bus_dma_segment_t *segs, int nsegs, 1031 bus_size_t mapsize, int error) 1032 { 1033 struct re_dmaload_arg *arg = xarg; 1034 int i; 1035 1036 if (error) 1037 return; 1038 1039 if (nsegs > arg->re_nsegs) { 1040 arg->re_nsegs = 0; 1041 return; 1042 } 1043 1044 arg->re_nsegs = nsegs; 1045 for (i = 0; i < nsegs; ++i) 1046 arg->re_segs[i] = segs[i]; 1047 } 1048 1049 /* 1050 * Map a single buffer address. 1051 */ 1052 1053 static void 1054 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1055 { 1056 uint32_t *addr; 1057 1058 if (error) 1059 return; 1060 1061 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 1062 addr = arg; 1063 *addr = segs->ds_addr; 1064 } 1065 1066 static int 1067 re_allocmem(device_t dev) 1068 { 1069 struct re_softc *sc = device_get_softc(dev); 1070 int error, i; 1071 1072 /* 1073 * Allocate list data 1074 */ 1075 sc->re_ldata.re_tx_mbuf = 1076 kmalloc(sc->re_tx_desc_cnt * sizeof(struct mbuf *), 1077 M_DEVBUF, M_ZERO | M_WAITOK); 1078 1079 sc->re_ldata.re_rx_mbuf = 1080 kmalloc(sc->re_rx_desc_cnt * sizeof(struct mbuf *), 1081 M_DEVBUF, M_ZERO | M_WAITOK); 1082 1083 sc->re_ldata.re_rx_paddr = 1084 kmalloc(sc->re_rx_desc_cnt * sizeof(bus_addr_t), 1085 M_DEVBUF, M_ZERO | M_WAITOK); 1086 1087 sc->re_ldata.re_tx_dmamap = 1088 kmalloc(sc->re_tx_desc_cnt * sizeof(bus_dmamap_t), 1089 M_DEVBUF, M_ZERO | M_WAITOK); 1090 1091 sc->re_ldata.re_rx_dmamap = 1092 kmalloc(sc->re_rx_desc_cnt * sizeof(bus_dmamap_t), 1093 M_DEVBUF, M_ZERO | M_WAITOK); 1094 1095 /* 1096 * Allocate the parent bus DMA tag appropriate for PCI. 1097 */ 1098 error = bus_dma_tag_create(NULL, /* parent */ 1099 1, 0, /* alignment, boundary */ 1100 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1101 BUS_SPACE_MAXADDR, /* highaddr */ 1102 NULL, NULL, /* filter, filterarg */ 1103 MAXBSIZE, RE_MAXSEGS, /* maxsize, nsegments */ 1104 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1105 BUS_DMA_ALLOCNOW, /* flags */ 1106 &sc->re_parent_tag); 1107 if (error) { 1108 device_printf(dev, "could not allocate parent dma tag\n"); 1109 return error; 1110 } 1111 1112 /* Allocate tag for TX descriptor list. */ 1113 error = bus_dma_tag_create(sc->re_parent_tag, 1114 RE_RING_ALIGN, 0, 1115 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1116 NULL, NULL, 1117 RE_TX_LIST_SZ(sc), 1, RE_TX_LIST_SZ(sc), 1118 BUS_DMA_ALLOCNOW, 1119 &sc->re_ldata.re_tx_list_tag); 1120 if (error) { 1121 device_printf(dev, "could not allocate TX ring dma tag\n"); 1122 return(error); 1123 } 1124 1125 /* Allocate DMA'able memory for the TX ring */ 1126 error = bus_dmamem_alloc(sc->re_ldata.re_tx_list_tag, 1127 (void **)&sc->re_ldata.re_tx_list, 1128 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1129 &sc->re_ldata.re_tx_list_map); 1130 if (error) { 1131 device_printf(dev, "could not allocate TX ring\n"); 1132 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 1133 sc->re_ldata.re_tx_list_tag = NULL; 1134 return(error); 1135 } 1136 1137 /* Load the map for the TX ring. */ 1138 error = bus_dmamap_load(sc->re_ldata.re_tx_list_tag, 1139 sc->re_ldata.re_tx_list_map, 1140 sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc), 1141 re_dma_map_addr, &sc->re_ldata.re_tx_list_addr, 1142 BUS_DMA_NOWAIT); 1143 if (error) { 1144 device_printf(dev, "could not get address of TX ring\n"); 1145 bus_dmamem_free(sc->re_ldata.re_tx_list_tag, 1146 sc->re_ldata.re_tx_list, 1147 sc->re_ldata.re_tx_list_map); 1148 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 1149 sc->re_ldata.re_tx_list_tag = NULL; 1150 return(error); 1151 } 1152 1153 /* Allocate tag for RX descriptor list. */ 1154 error = bus_dma_tag_create(sc->re_parent_tag, 1155 RE_RING_ALIGN, 0, 1156 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1157 NULL, NULL, 1158 RE_RX_LIST_SZ(sc), 1, RE_RX_LIST_SZ(sc), 1159 BUS_DMA_ALLOCNOW, 1160 &sc->re_ldata.re_rx_list_tag); 1161 if (error) { 1162 device_printf(dev, "could not allocate RX ring dma tag\n"); 1163 return(error); 1164 } 1165 1166 /* Allocate DMA'able memory for the RX ring */ 1167 error = bus_dmamem_alloc(sc->re_ldata.re_rx_list_tag, 1168 (void **)&sc->re_ldata.re_rx_list, 1169 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1170 &sc->re_ldata.re_rx_list_map); 1171 if (error) { 1172 device_printf(dev, "could not allocate RX ring\n"); 1173 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 1174 sc->re_ldata.re_rx_list_tag = NULL; 1175 return(error); 1176 } 1177 1178 /* Load the map for the RX ring. */ 1179 error = bus_dmamap_load(sc->re_ldata.re_rx_list_tag, 1180 sc->re_ldata.re_rx_list_map, 1181 sc->re_ldata.re_rx_list, RE_RX_LIST_SZ(sc), 1182 re_dma_map_addr, &sc->re_ldata.re_rx_list_addr, 1183 BUS_DMA_NOWAIT); 1184 if (error) { 1185 device_printf(dev, "could not get address of RX ring\n"); 1186 bus_dmamem_free(sc->re_ldata.re_rx_list_tag, 1187 sc->re_ldata.re_rx_list, 1188 sc->re_ldata.re_rx_list_map); 1189 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 1190 sc->re_ldata.re_rx_list_tag = NULL; 1191 return(error); 1192 } 1193 1194 /* Allocate map for RX/TX mbufs. */ 1195 error = bus_dma_tag_create(sc->re_parent_tag, 1196 ETHER_ALIGN, 0, 1197 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1198 NULL, NULL, 1199 RE_FRAMELEN_MAX, RE_MAXSEGS, MCLBYTES, 1200 BUS_DMA_ALLOCNOW, 1201 &sc->re_ldata.re_mtag); 1202 if (error) { 1203 device_printf(dev, "could not allocate buf dma tag\n"); 1204 return(error); 1205 } 1206 1207 /* Create spare DMA map for RX */ 1208 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 1209 &sc->re_ldata.re_rx_spare); 1210 if (error) { 1211 device_printf(dev, "can't create spare DMA map for RX\n"); 1212 bus_dma_tag_destroy(sc->re_ldata.re_mtag); 1213 sc->re_ldata.re_mtag = NULL; 1214 return error; 1215 } 1216 1217 /* Create DMA maps for TX buffers */ 1218 for (i = 0; i < sc->re_tx_desc_cnt; i++) { 1219 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 1220 &sc->re_ldata.re_tx_dmamap[i]); 1221 if (error) { 1222 device_printf(dev, "can't create DMA map for TX buf\n"); 1223 re_freebufmem(sc, i, 0); 1224 return(error); 1225 } 1226 } 1227 1228 /* Create DMA maps for RX buffers */ 1229 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 1230 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 1231 &sc->re_ldata.re_rx_dmamap[i]); 1232 if (error) { 1233 device_printf(dev, "can't create DMA map for RX buf\n"); 1234 re_freebufmem(sc, sc->re_tx_desc_cnt, i); 1235 return(error); 1236 } 1237 } 1238 1239 /* Create jumbo buffer pool for RX if required */ 1240 if (sc->re_caps & RE_C_CONTIGRX) { 1241 error = re_jpool_alloc(sc); 1242 if (error) { 1243 re_jpool_free(sc); 1244 /* Disable jumbo frame support */ 1245 sc->re_maxmtu = ETHERMTU; 1246 } 1247 } 1248 return(0); 1249 } 1250 1251 static void 1252 re_freebufmem(struct re_softc *sc, int tx_cnt, int rx_cnt) 1253 { 1254 int i; 1255 1256 /* Destroy all the RX and TX buffer maps */ 1257 if (sc->re_ldata.re_mtag) { 1258 for (i = 0; i < tx_cnt; i++) { 1259 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1260 sc->re_ldata.re_tx_dmamap[i]); 1261 } 1262 for (i = 0; i < rx_cnt; i++) { 1263 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1264 sc->re_ldata.re_rx_dmamap[i]); 1265 } 1266 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1267 sc->re_ldata.re_rx_spare); 1268 bus_dma_tag_destroy(sc->re_ldata.re_mtag); 1269 sc->re_ldata.re_mtag = NULL; 1270 } 1271 } 1272 1273 static void 1274 re_freemem(device_t dev) 1275 { 1276 struct re_softc *sc = device_get_softc(dev); 1277 1278 /* Unload and free the RX DMA ring memory and map */ 1279 if (sc->re_ldata.re_rx_list_tag) { 1280 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag, 1281 sc->re_ldata.re_rx_list_map); 1282 bus_dmamem_free(sc->re_ldata.re_rx_list_tag, 1283 sc->re_ldata.re_rx_list, 1284 sc->re_ldata.re_rx_list_map); 1285 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 1286 } 1287 1288 /* Unload and free the TX DMA ring memory and map */ 1289 if (sc->re_ldata.re_tx_list_tag) { 1290 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag, 1291 sc->re_ldata.re_tx_list_map); 1292 bus_dmamem_free(sc->re_ldata.re_tx_list_tag, 1293 sc->re_ldata.re_tx_list, 1294 sc->re_ldata.re_tx_list_map); 1295 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 1296 } 1297 1298 /* Free RX/TX buf DMA stuffs */ 1299 re_freebufmem(sc, sc->re_tx_desc_cnt, sc->re_rx_desc_cnt); 1300 1301 /* Unload and free the stats buffer and map */ 1302 if (sc->re_ldata.re_stag) { 1303 bus_dmamap_unload(sc->re_ldata.re_stag, 1304 sc->re_ldata.re_rx_list_map); 1305 bus_dmamem_free(sc->re_ldata.re_stag, 1306 sc->re_ldata.re_stats, 1307 sc->re_ldata.re_smap); 1308 bus_dma_tag_destroy(sc->re_ldata.re_stag); 1309 } 1310 1311 if (sc->re_caps & RE_C_CONTIGRX) 1312 re_jpool_free(sc); 1313 1314 if (sc->re_parent_tag) 1315 bus_dma_tag_destroy(sc->re_parent_tag); 1316 1317 if (sc->re_ldata.re_tx_mbuf != NULL) 1318 kfree(sc->re_ldata.re_tx_mbuf, M_DEVBUF); 1319 if (sc->re_ldata.re_rx_mbuf != NULL) 1320 kfree(sc->re_ldata.re_rx_mbuf, M_DEVBUF); 1321 if (sc->re_ldata.re_rx_paddr != NULL) 1322 kfree(sc->re_ldata.re_rx_paddr, M_DEVBUF); 1323 if (sc->re_ldata.re_tx_dmamap != NULL) 1324 kfree(sc->re_ldata.re_tx_dmamap, M_DEVBUF); 1325 if (sc->re_ldata.re_rx_dmamap != NULL) 1326 kfree(sc->re_ldata.re_rx_dmamap, M_DEVBUF); 1327 } 1328 1329 /* 1330 * Attach the interface. Allocate softc structures, do ifmedia 1331 * setup and ethernet/BPF attach. 1332 */ 1333 static int 1334 re_attach(device_t dev) 1335 { 1336 struct re_softc *sc = device_get_softc(dev); 1337 struct ifnet *ifp; 1338 uint8_t eaddr[ETHER_ADDR_LEN]; 1339 int error = 0, rid, qlen; 1340 1341 callout_init(&sc->re_timer); 1342 sc->re_dev = dev; 1343 1344 if (RE_IS_8139CP(sc)) { 1345 sc->re_rx_desc_cnt = RE_RX_DESC_CNT_8139CP; 1346 sc->re_tx_desc_cnt = RE_TX_DESC_CNT_8139CP; 1347 } else { 1348 sc->re_rx_desc_cnt = re_rx_desc_count; 1349 if (sc->re_rx_desc_cnt > RE_RX_DESC_CNT_MAX) 1350 sc->re_rx_desc_cnt = RE_RX_DESC_CNT_MAX; 1351 1352 sc->re_tx_desc_cnt = re_tx_desc_count; 1353 if (sc->re_tx_desc_cnt > RE_TX_DESC_CNT_MAX) 1354 sc->re_tx_desc_cnt = RE_TX_DESC_CNT_MAX; 1355 } 1356 1357 qlen = RE_IFQ_MAXLEN; 1358 if (sc->re_tx_desc_cnt > qlen) 1359 qlen = sc->re_tx_desc_cnt; 1360 1361 sc->re_rxbuf_size = MCLBYTES; 1362 sc->re_newbuf = re_newbuf_std; 1363 1364 sc->re_tx_time = 5; /* 125us */ 1365 sc->re_rx_time = 2; /* 50us */ 1366 if (sc->re_caps & RE_C_PCIE) 1367 sc->re_sim_time = 75; /* 75us */ 1368 else 1369 sc->re_sim_time = 125; /* 125us */ 1370 sc->re_imtype = RE_IMTYPE_SIM; /* simulated interrupt moderation */ 1371 re_config_imtype(sc, sc->re_imtype); 1372 1373 sysctl_ctx_init(&sc->re_sysctl_ctx); 1374 sc->re_sysctl_tree = SYSCTL_ADD_NODE(&sc->re_sysctl_ctx, 1375 SYSCTL_STATIC_CHILDREN(_hw), 1376 OID_AUTO, 1377 device_get_nameunit(dev), 1378 CTLFLAG_RD, 0, ""); 1379 if (sc->re_sysctl_tree == NULL) { 1380 device_printf(dev, "can't add sysctl node\n"); 1381 error = ENXIO; 1382 goto fail; 1383 } 1384 SYSCTL_ADD_INT(&sc->re_sysctl_ctx, 1385 SYSCTL_CHILDREN(sc->re_sysctl_tree), OID_AUTO, 1386 "rx_desc_count", CTLFLAG_RD, &sc->re_rx_desc_cnt, 1387 0, "RX desc count"); 1388 SYSCTL_ADD_INT(&sc->re_sysctl_ctx, 1389 SYSCTL_CHILDREN(sc->re_sysctl_tree), OID_AUTO, 1390 "tx_desc_count", CTLFLAG_RD, &sc->re_tx_desc_cnt, 1391 0, "TX desc count"); 1392 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx, 1393 SYSCTL_CHILDREN(sc->re_sysctl_tree), 1394 OID_AUTO, "sim_time", 1395 CTLTYPE_INT | CTLFLAG_RW, 1396 sc, 0, re_sysctl_simtime, "I", 1397 "Simulated interrupt moderation time (usec)."); 1398 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx, 1399 SYSCTL_CHILDREN(sc->re_sysctl_tree), 1400 OID_AUTO, "imtype", 1401 CTLTYPE_INT | CTLFLAG_RW, 1402 sc, 0, re_sysctl_imtype, "I", 1403 "Interrupt moderation type -- " 1404 "0:disable, 1:simulated, " 1405 "2:hardware(if supported)"); 1406 if (sc->re_caps & RE_C_HWIM) { 1407 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx, 1408 SYSCTL_CHILDREN(sc->re_sysctl_tree), 1409 OID_AUTO, "hw_rxtime", 1410 CTLTYPE_INT | CTLFLAG_RW, 1411 sc, 0, re_sysctl_rxtime, "I", 1412 "Hardware interrupt moderation time " 1413 "(unit: 25usec)."); 1414 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx, 1415 SYSCTL_CHILDREN(sc->re_sysctl_tree), 1416 OID_AUTO, "hw_txtime", 1417 CTLTYPE_INT | CTLFLAG_RW, 1418 sc, 0, re_sysctl_txtime, "I", 1419 "Hardware interrupt moderation time " 1420 "(unit: 25usec)."); 1421 } 1422 1423 #ifndef BURN_BRIDGES 1424 /* 1425 * Handle power management nonsense. 1426 */ 1427 1428 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1429 uint32_t membase, irq; 1430 1431 /* Save important PCI config data. */ 1432 membase = pci_read_config(dev, RE_PCI_LOMEM, 4); 1433 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1434 1435 /* Reset the power state. */ 1436 device_printf(dev, "chip is in D%d power mode " 1437 "-- setting to D0\n", pci_get_powerstate(dev)); 1438 1439 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1440 1441 /* Restore PCI config data. */ 1442 pci_write_config(dev, RE_PCI_LOMEM, membase, 4); 1443 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1444 } 1445 #endif 1446 /* 1447 * Map control/status registers. 1448 */ 1449 pci_enable_busmaster(dev); 1450 1451 rid = RE_PCI_LOIO; 1452 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 1453 RF_ACTIVE); 1454 1455 if (sc->re_res == NULL) { 1456 device_printf(dev, "couldn't map ports\n"); 1457 error = ENXIO; 1458 goto fail; 1459 } 1460 1461 sc->re_btag = rman_get_bustag(sc->re_res); 1462 sc->re_bhandle = rman_get_bushandle(sc->re_res); 1463 1464 /* Allocate interrupt */ 1465 rid = 0; 1466 sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1467 RF_SHAREABLE | RF_ACTIVE); 1468 1469 if (sc->re_irq == NULL) { 1470 device_printf(dev, "couldn't map interrupt\n"); 1471 error = ENXIO; 1472 goto fail; 1473 } 1474 1475 /* Reset the adapter. */ 1476 re_reset(sc, 0); 1477 1478 if (RE_IS_8139CP(sc)) { 1479 sc->re_bus_speed = 33; /* XXX */ 1480 } else if (sc->re_caps & RE_C_PCIE) { 1481 sc->re_bus_speed = 125; 1482 } else { 1483 uint8_t cfg2; 1484 1485 cfg2 = CSR_READ_1(sc, RE_CFG2); 1486 switch (cfg2 & RE_CFG2_PCICLK_MASK) { 1487 case RE_CFG2_PCICLK_33MHZ: 1488 sc->re_bus_speed = 33; 1489 break; 1490 case RE_CFG2_PCICLK_66MHZ: 1491 sc->re_bus_speed = 66; 1492 break; 1493 default: 1494 device_printf(dev, "unknown bus speed, assume 33MHz\n"); 1495 sc->re_bus_speed = 33; 1496 break; 1497 } 1498 if (cfg2 & RE_CFG2_PCI64) 1499 sc->re_caps |= RE_C_PCI64; 1500 } 1501 device_printf(dev, "Hardware rev. 0x%08x; MAC ver. 0x%02x; " 1502 "PCI%s %dMHz\n", 1503 sc->re_hwrev, sc->re_macver, 1504 (sc->re_caps & RE_C_PCIE) ? 1505 "-E" : ((sc->re_caps & RE_C_PCI64) ? "64" : "32"), 1506 sc->re_bus_speed); 1507 1508 /* 1509 * NOTE: 1510 * DO NOT try to adjust config1 and config5 which was spotted in 1511 * Realtek's Linux drivers. It will _permanently_ damage certain 1512 * cards EEPROM, e.g. one of my 8168B (0x38000000) card ... 1513 */ 1514 1515 re_get_eaddr(sc, eaddr); 1516 1517 if (!RE_IS_8139CP(sc)) { 1518 /* Set RX length mask */ 1519 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN; 1520 sc->re_txstart = RE_GTXSTART; 1521 } else { 1522 /* Set RX length mask */ 1523 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN; 1524 sc->re_txstart = RE_TXSTART; 1525 } 1526 1527 /* Allocate DMA stuffs */ 1528 error = re_allocmem(dev); 1529 if (error) 1530 goto fail; 1531 1532 /* 1533 * Apply some magic PCI settings from Realtek ... 1534 */ 1535 if (RE_IS_8169(sc)) { 1536 CSR_WRITE_1(sc, 0x82, 1); 1537 pci_write_config(dev, PCIR_CACHELNSZ, 0x8, 1); 1538 } 1539 pci_write_config(dev, PCIR_LATTIMER, 0x40, 1); 1540 1541 if (sc->re_caps & RE_C_MAC2) { 1542 /* 1543 * Following part is extracted from Realtek BSD driver v176. 1544 * However, this does _not_ make much/any sense: 1545 * 8168C's PCI Express device control is located at 0x78, 1546 * so the reading from 0x79 (higher part of 0x78) and setting 1547 * the 4~6bits intend to enlarge the "max read request size" 1548 * (we will do it). The content of the rest part of this 1549 * register is not meaningful to other PCI registers, so 1550 * writing the value to 0x54 could be completely wrong. 1551 * 0x80 is the lower part of PCI Express device status, non- 1552 * reserved bits are RW1C, writing 0 to them will not have 1553 * any effect at all. 1554 */ 1555 #ifdef foo 1556 uint8_t val; 1557 1558 val = pci_read_config(dev, 0x79, 1); 1559 val = (val & ~0x70) | 0x50; 1560 pci_write_config(dev, 0x54, val, 1); 1561 pci_write_config(dev, 0x80, 0, 1); 1562 #endif 1563 } 1564 1565 /* 1566 * Apply some PHY fixup from Realtek ... 1567 */ 1568 if (sc->re_hwrev == RE_HWREV_8110S) { 1569 CSR_WRITE_1(sc, 0x82, 1); 1570 re_miibus_writereg(dev, 1, 0xb, 0); 1571 } 1572 if (sc->re_caps & RE_C_PHYPMGT) { 1573 /* Power up PHY */ 1574 re_miibus_writereg(dev, 1, 0x1f, 0); 1575 re_miibus_writereg(dev, 1, 0xe, 0); 1576 } 1577 1578 /* Do MII setup */ 1579 if (mii_phy_probe(dev, &sc->re_miibus, 1580 re_ifmedia_upd, re_ifmedia_sts)) { 1581 device_printf(dev, "MII without any phy!\n"); 1582 error = ENXIO; 1583 goto fail; 1584 } 1585 1586 ifp = &sc->arpcom.ac_if; 1587 ifp->if_softc = sc; 1588 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1589 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1590 ifp->if_ioctl = re_ioctl; 1591 ifp->if_start = re_start; 1592 #ifdef DEVICE_POLLING 1593 ifp->if_poll = re_poll; 1594 #endif 1595 ifp->if_watchdog = re_watchdog; 1596 ifp->if_init = re_init; 1597 if (!RE_IS_8139CP(sc)) /* XXX */ 1598 ifp->if_baudrate = 1000000000; 1599 else 1600 ifp->if_baudrate = 100000000; 1601 ifq_set_maxlen(&ifp->if_snd, qlen); 1602 ifq_set_ready(&ifp->if_snd); 1603 1604 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1605 if (sc->re_caps & RE_C_HWCSUM) 1606 ifp->if_capabilities |= IFCAP_HWCSUM; 1607 1608 ifp->if_capenable = ifp->if_capabilities; 1609 if (ifp->if_capabilities & IFCAP_HWCSUM) 1610 ifp->if_hwassist = RE_CSUM_FEATURES; 1611 else 1612 ifp->if_hwassist = 0; 1613 1614 /* 1615 * Call MI attach routine. 1616 */ 1617 ether_ifattach(ifp, eaddr, NULL); 1618 1619 #ifdef RE_DIAG 1620 /* 1621 * Perform hardware diagnostic on the original RTL8169. 1622 * Some 32-bit cards were incorrectly wired and would 1623 * malfunction if plugged into a 64-bit slot. 1624 */ 1625 if (sc->re_hwrev == RE_HWREV_8169) { 1626 lwkt_serialize_enter(ifp->if_serializer); 1627 error = re_diag(sc); 1628 lwkt_serialize_exit(ifp->if_serializer); 1629 1630 if (error) { 1631 device_printf(dev, "hardware diagnostic failure\n"); 1632 ether_ifdetach(ifp); 1633 goto fail; 1634 } 1635 } 1636 #endif /* RE_DIAG */ 1637 1638 /* Hook interrupt last to avoid having to lock softc */ 1639 error = bus_setup_intr(dev, sc->re_irq, INTR_MPSAFE, re_intr, sc, 1640 &sc->re_intrhand, ifp->if_serializer); 1641 1642 if (error) { 1643 device_printf(dev, "couldn't set up irq\n"); 1644 ether_ifdetach(ifp); 1645 goto fail; 1646 } 1647 1648 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->re_irq)); 1649 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1650 1651 fail: 1652 if (error) 1653 re_detach(dev); 1654 1655 return (error); 1656 } 1657 1658 /* 1659 * Shutdown hardware and free up resources. This can be called any 1660 * time after the mutex has been initialized. It is called in both 1661 * the error case in attach and the normal detach case so it needs 1662 * to be careful about only freeing resources that have actually been 1663 * allocated. 1664 */ 1665 static int 1666 re_detach(device_t dev) 1667 { 1668 struct re_softc *sc = device_get_softc(dev); 1669 struct ifnet *ifp = &sc->arpcom.ac_if; 1670 1671 /* These should only be active if attach succeeded */ 1672 if (device_is_attached(dev)) { 1673 lwkt_serialize_enter(ifp->if_serializer); 1674 re_stop(sc); 1675 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand); 1676 lwkt_serialize_exit(ifp->if_serializer); 1677 1678 ether_ifdetach(ifp); 1679 } 1680 if (sc->re_miibus) 1681 device_delete_child(dev, sc->re_miibus); 1682 bus_generic_detach(dev); 1683 1684 if (sc->re_sysctl_tree != NULL) 1685 sysctl_ctx_free(&sc->re_sysctl_ctx); 1686 1687 if (sc->re_irq) 1688 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->re_irq); 1689 if (sc->re_res) { 1690 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, 1691 sc->re_res); 1692 } 1693 1694 /* Free DMA stuffs */ 1695 re_freemem(dev); 1696 1697 return(0); 1698 } 1699 1700 static void 1701 re_setup_rxdesc(struct re_softc *sc, int idx) 1702 { 1703 bus_addr_t paddr; 1704 uint32_t cmdstat; 1705 struct re_desc *d; 1706 1707 paddr = sc->re_ldata.re_rx_paddr[idx]; 1708 d = &sc->re_ldata.re_rx_list[idx]; 1709 1710 d->re_bufaddr_lo = htole32(RE_ADDR_LO(paddr)); 1711 d->re_bufaddr_hi = htole32(RE_ADDR_HI(paddr)); 1712 1713 cmdstat = sc->re_rxbuf_size | RE_RDESC_CMD_OWN; 1714 if (idx == (sc->re_rx_desc_cnt - 1)) 1715 cmdstat |= RE_RDESC_CMD_EOR; 1716 d->re_cmdstat = htole32(cmdstat); 1717 } 1718 1719 static int 1720 re_newbuf_std(struct re_softc *sc, int idx, int init) 1721 { 1722 struct re_dmaload_arg arg; 1723 bus_dma_segment_t seg; 1724 bus_dmamap_t map; 1725 struct mbuf *m; 1726 int error; 1727 1728 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 1729 if (m == NULL) { 1730 error = ENOBUFS; 1731 1732 if (init) { 1733 if_printf(&sc->arpcom.ac_if, "m_getcl failed\n"); 1734 return error; 1735 } else { 1736 goto back; 1737 } 1738 } 1739 m->m_len = m->m_pkthdr.len = MCLBYTES; 1740 1741 /* 1742 * NOTE: 1743 * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer 1744 * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here. 1745 */ 1746 1747 arg.re_nsegs = 1; 1748 arg.re_segs = &seg; 1749 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, 1750 sc->re_ldata.re_rx_spare, m, 1751 re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 1752 if (error || arg.re_nsegs == 0) { 1753 if (!error) { 1754 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 1755 bus_dmamap_unload(sc->re_ldata.re_mtag, 1756 sc->re_ldata.re_rx_spare); 1757 error = EFBIG; 1758 } 1759 m_freem(m); 1760 1761 if (init) { 1762 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 1763 return error; 1764 } else { 1765 goto back; 1766 } 1767 } 1768 1769 if (!init) { 1770 bus_dmamap_sync(sc->re_ldata.re_mtag, 1771 sc->re_ldata.re_rx_dmamap[idx], 1772 BUS_DMASYNC_POSTREAD); 1773 bus_dmamap_unload(sc->re_ldata.re_mtag, 1774 sc->re_ldata.re_rx_dmamap[idx]); 1775 } 1776 sc->re_ldata.re_rx_mbuf[idx] = m; 1777 sc->re_ldata.re_rx_paddr[idx] = seg.ds_addr; 1778 1779 map = sc->re_ldata.re_rx_dmamap[idx]; 1780 sc->re_ldata.re_rx_dmamap[idx] = sc->re_ldata.re_rx_spare; 1781 sc->re_ldata.re_rx_spare = map; 1782 back: 1783 re_setup_rxdesc(sc, idx); 1784 return error; 1785 } 1786 1787 static int 1788 re_newbuf_jumbo(struct re_softc *sc, int idx, int init) 1789 { 1790 struct mbuf *m; 1791 struct re_jbuf *jbuf; 1792 int error = 0; 1793 1794 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 1795 if (m == NULL) { 1796 error = ENOBUFS; 1797 if (init) { 1798 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 1799 return error; 1800 } else { 1801 goto back; 1802 } 1803 } 1804 1805 jbuf = re_jbuf_alloc(sc); 1806 if (jbuf == NULL) { 1807 m_freem(m); 1808 1809 error = ENOBUFS; 1810 if (init) { 1811 if_printf(&sc->arpcom.ac_if, "jpool is empty\n"); 1812 return error; 1813 } else { 1814 goto back; 1815 } 1816 } 1817 1818 m->m_ext.ext_arg = jbuf; 1819 m->m_ext.ext_buf = jbuf->re_buf; 1820 m->m_ext.ext_free = re_jbuf_free; 1821 m->m_ext.ext_ref = re_jbuf_ref; 1822 m->m_ext.ext_size = sc->re_rxbuf_size; 1823 1824 m->m_data = m->m_ext.ext_buf; 1825 m->m_flags |= M_EXT; 1826 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1827 1828 /* 1829 * NOTE: 1830 * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer 1831 * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here. 1832 */ 1833 1834 sc->re_ldata.re_rx_mbuf[idx] = m; 1835 sc->re_ldata.re_rx_paddr[idx] = jbuf->re_paddr; 1836 back: 1837 re_setup_rxdesc(sc, idx); 1838 return error; 1839 } 1840 1841 static int 1842 re_tx_list_init(struct re_softc *sc) 1843 { 1844 bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 1845 1846 /* Flush the TX descriptors */ 1847 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 1848 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE); 1849 1850 sc->re_ldata.re_tx_prodidx = 0; 1851 sc->re_ldata.re_tx_considx = 0; 1852 sc->re_ldata.re_tx_free = sc->re_tx_desc_cnt; 1853 1854 return(0); 1855 } 1856 1857 static int 1858 re_rx_list_init(struct re_softc *sc) 1859 { 1860 int i, error; 1861 1862 bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ(sc)); 1863 1864 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 1865 error = sc->re_newbuf(sc, i, 1); 1866 if (error) 1867 return(error); 1868 } 1869 1870 /* Flush the RX descriptors */ 1871 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1872 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_PREWRITE); 1873 1874 sc->re_ldata.re_rx_prodidx = 0; 1875 sc->re_head = sc->re_tail = NULL; 1876 1877 return(0); 1878 } 1879 1880 #define RE_IP4_PACKET 0x1 1881 #define RE_TCP_PACKET 0x2 1882 #define RE_UDP_PACKET 0x4 1883 1884 static __inline uint8_t 1885 re_packet_type(struct re_softc *sc, uint32_t rxstat, uint32_t rxctrl) 1886 { 1887 uint8_t packet_type = 0; 1888 1889 if (sc->re_caps & RE_C_MAC2) { 1890 if (rxctrl & RE_RDESC_CTL_PROTOIP4) 1891 packet_type |= RE_IP4_PACKET; 1892 } else { 1893 if (rxstat & RE_RDESC_STAT_PROTOID) 1894 packet_type |= RE_IP4_PACKET; 1895 } 1896 if (RE_TCPPKT(rxstat)) 1897 packet_type |= RE_TCP_PACKET; 1898 else if (RE_UDPPKT(rxstat)) 1899 packet_type |= RE_UDP_PACKET; 1900 return packet_type; 1901 } 1902 1903 /* 1904 * RX handler for C+ and 8169. For the gigE chips, we support 1905 * the reception of jumbo frames that have been fragmented 1906 * across multiple 2K mbuf cluster buffers. 1907 */ 1908 static int 1909 re_rxeof(struct re_softc *sc) 1910 { 1911 struct ifnet *ifp = &sc->arpcom.ac_if; 1912 struct mbuf *m; 1913 struct re_desc *cur_rx; 1914 uint32_t rxstat, rxctrl; 1915 int i, total_len, rx = 0; 1916 struct mbuf_chain chain[MAXCPU]; 1917 1918 /* Invalidate the descriptor memory */ 1919 1920 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1921 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD); 1922 1923 ether_input_chain_init(chain); 1924 1925 for (i = sc->re_ldata.re_rx_prodidx; 1926 RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0; RE_RXDESC_INC(sc, i)) { 1927 cur_rx = &sc->re_ldata.re_rx_list[i]; 1928 m = sc->re_ldata.re_rx_mbuf[i]; 1929 total_len = RE_RXBYTES(cur_rx); 1930 rxstat = le32toh(cur_rx->re_cmdstat); 1931 rxctrl = le32toh(cur_rx->re_control); 1932 1933 rx = 1; 1934 1935 #ifdef INVARIANTS 1936 if (sc->re_flags & RE_F_USE_JPOOL) 1937 KKASSERT(rxstat & RE_RDESC_STAT_EOF); 1938 #endif 1939 1940 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1941 if (sc->re_flags & RE_F_DROP_RXFRAG) { 1942 re_setup_rxdesc(sc, i); 1943 continue; 1944 } 1945 1946 if (sc->re_newbuf(sc, i, 0)) { 1947 /* Drop upcoming fragments */ 1948 sc->re_flags |= RE_F_DROP_RXFRAG; 1949 continue; 1950 } 1951 1952 m->m_len = MCLBYTES; 1953 if (sc->re_head == NULL) { 1954 sc->re_head = sc->re_tail = m; 1955 } else { 1956 sc->re_tail->m_next = m; 1957 sc->re_tail = m; 1958 } 1959 continue; 1960 } else if (sc->re_flags & RE_F_DROP_RXFRAG) { 1961 /* 1962 * Last fragment of a multi-fragment packet. 1963 * 1964 * Since error already happened, this fragment 1965 * must be dropped as well as the fragment chain. 1966 */ 1967 re_setup_rxdesc(sc, i); 1968 re_free_rxchain(sc); 1969 sc->re_flags &= ~RE_F_DROP_RXFRAG; 1970 continue; 1971 } 1972 1973 /* 1974 * NOTE: for the 8139C+, the frame length field 1975 * is always 12 bits in size, but for the gigE chips, 1976 * it is 13 bits (since the max RX frame length is 16K). 1977 * Unfortunately, all 32 bits in the status word 1978 * were already used, so to make room for the extra 1979 * length bit, RealTek took out the 'frame alignment 1980 * error' bit and shifted the other status bits 1981 * over one slot. The OWN, EOR, FS and LS bits are 1982 * still in the same places. We have already extracted 1983 * the frame length and checked the OWN bit, so rather 1984 * than using an alternate bit mapping, we shift the 1985 * status bits one space to the right so we can evaluate 1986 * them using the 8169 status as though it was in the 1987 * same format as that of the 8139C+. 1988 */ 1989 if (!RE_IS_8139CP(sc)) 1990 rxstat >>= 1; 1991 1992 if (rxstat & RE_RDESC_STAT_RXERRSUM) { 1993 ifp->if_ierrors++; 1994 /* 1995 * If this is part of a multi-fragment packet, 1996 * discard all the pieces. 1997 */ 1998 re_free_rxchain(sc); 1999 re_setup_rxdesc(sc, i); 2000 continue; 2001 } 2002 2003 /* 2004 * If allocating a replacement mbuf fails, 2005 * reload the current one. 2006 */ 2007 2008 if (sc->re_newbuf(sc, i, 0)) { 2009 ifp->if_ierrors++; 2010 continue; 2011 } 2012 2013 if (sc->re_head != NULL) { 2014 m->m_len = total_len % MCLBYTES; 2015 /* 2016 * Special case: if there's 4 bytes or less 2017 * in this buffer, the mbuf can be discarded: 2018 * the last 4 bytes is the CRC, which we don't 2019 * care about anyway. 2020 */ 2021 if (m->m_len <= ETHER_CRC_LEN) { 2022 sc->re_tail->m_len -= 2023 (ETHER_CRC_LEN - m->m_len); 2024 m_freem(m); 2025 } else { 2026 m->m_len -= ETHER_CRC_LEN; 2027 sc->re_tail->m_next = m; 2028 } 2029 m = sc->re_head; 2030 sc->re_head = sc->re_tail = NULL; 2031 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 2032 } else { 2033 m->m_pkthdr.len = m->m_len = 2034 (total_len - ETHER_CRC_LEN); 2035 } 2036 2037 ifp->if_ipackets++; 2038 m->m_pkthdr.rcvif = ifp; 2039 2040 /* Do RX checksumming if enabled */ 2041 2042 if (ifp->if_capenable & IFCAP_RXCSUM) { 2043 uint8_t packet_type; 2044 2045 packet_type = re_packet_type(sc, rxstat, rxctrl); 2046 2047 /* Check IP header checksum */ 2048 if (packet_type & RE_IP4_PACKET) { 2049 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2050 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0) 2051 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2052 } 2053 2054 /* Check TCP/UDP checksum */ 2055 if (((packet_type & RE_TCP_PACKET) && 2056 (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) || 2057 ((packet_type & RE_UDP_PACKET) && 2058 (rxstat & RE_RDESC_STAT_UDPSUMBAD) == 0)) { 2059 m->m_pkthdr.csum_flags |= 2060 CSUM_DATA_VALID|CSUM_PSEUDO_HDR| 2061 CSUM_FRAG_NOT_CHECKED; 2062 m->m_pkthdr.csum_data = 0xffff; 2063 } 2064 } 2065 2066 if (rxctrl & RE_RDESC_CTL_HASTAG) { 2067 m->m_flags |= M_VLANTAG; 2068 m->m_pkthdr.ether_vlantag = 2069 be16toh((rxctrl & RE_RDESC_CTL_TAGDATA)); 2070 } 2071 ether_input_chain(ifp, m, chain); 2072 } 2073 2074 ether_input_dispatch(chain); 2075 2076 /* Flush the RX DMA ring */ 2077 2078 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 2079 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_PREWRITE); 2080 2081 sc->re_ldata.re_rx_prodidx = i; 2082 2083 return rx; 2084 } 2085 2086 #undef RE_IP4_PACKET 2087 #undef RE_TCP_PACKET 2088 #undef RE_UDP_PACKET 2089 2090 static int 2091 re_tx_collect(struct re_softc *sc) 2092 { 2093 struct ifnet *ifp = &sc->arpcom.ac_if; 2094 uint32_t txstat; 2095 int idx, tx = 0; 2096 2097 /* Invalidate the TX descriptor list */ 2098 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 2099 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_POSTREAD); 2100 2101 for (idx = sc->re_ldata.re_tx_considx; 2102 sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt; 2103 RE_TXDESC_INC(sc, idx)) { 2104 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat); 2105 if (txstat & RE_TDESC_CMD_OWN) 2106 break; 2107 2108 tx = 1; 2109 2110 sc->re_ldata.re_tx_list[idx].re_bufaddr_lo = 0; 2111 2112 /* 2113 * We only stash mbufs in the last descriptor 2114 * in a fragment chain, which also happens to 2115 * be the only place where the TX status bits 2116 * are valid. 2117 */ 2118 if (txstat & RE_TDESC_CMD_EOF) { 2119 m_freem(sc->re_ldata.re_tx_mbuf[idx]); 2120 sc->re_ldata.re_tx_mbuf[idx] = NULL; 2121 bus_dmamap_unload(sc->re_ldata.re_mtag, 2122 sc->re_ldata.re_tx_dmamap[idx]); 2123 if (txstat & (RE_TDESC_STAT_EXCESSCOL| 2124 RE_TDESC_STAT_COLCNT)) 2125 ifp->if_collisions++; 2126 if (txstat & RE_TDESC_STAT_TXERRSUM) 2127 ifp->if_oerrors++; 2128 else 2129 ifp->if_opackets++; 2130 } 2131 sc->re_ldata.re_tx_free++; 2132 } 2133 sc->re_ldata.re_tx_considx = idx; 2134 2135 return tx; 2136 } 2137 2138 static int 2139 re_txeof(struct re_softc *sc) 2140 { 2141 struct ifnet *ifp = &sc->arpcom.ac_if; 2142 int tx; 2143 2144 tx = re_tx_collect(sc); 2145 2146 /* There is enough free TX descs */ 2147 if (sc->re_ldata.re_tx_free > RE_TXDESC_SPARE) 2148 ifp->if_flags &= ~IFF_OACTIVE; 2149 2150 /* 2151 * Some chips will ignore a second TX request issued while an 2152 * existing transmission is in progress. If the transmitter goes 2153 * idle but there are still packets waiting to be sent, we need 2154 * to restart the channel here to flush them out. This only seems 2155 * to be required with the PCIe devices. 2156 */ 2157 if (sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt) 2158 CSR_WRITE_1(sc, sc->re_txstart, RE_TXSTART_START); 2159 else 2160 ifp->if_timer = 0; 2161 2162 return tx; 2163 } 2164 2165 static void 2166 re_tick(void *xsc) 2167 { 2168 struct re_softc *sc = xsc; 2169 2170 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); 2171 re_tick_serialized(xsc); 2172 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); 2173 } 2174 2175 static void 2176 re_tick_serialized(void *xsc) 2177 { 2178 struct re_softc *sc = xsc; 2179 struct ifnet *ifp = &sc->arpcom.ac_if; 2180 struct mii_data *mii; 2181 2182 ASSERT_SERIALIZED(ifp->if_serializer); 2183 2184 mii = device_get_softc(sc->re_miibus); 2185 mii_tick(mii); 2186 if (sc->re_flags & RE_F_LINKED) { 2187 if (!(mii->mii_media_status & IFM_ACTIVE)) 2188 sc->re_flags &= ~RE_F_LINKED; 2189 } else { 2190 if (mii->mii_media_status & IFM_ACTIVE && 2191 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2192 sc->re_flags |= RE_F_LINKED; 2193 if (!ifq_is_empty(&ifp->if_snd)) 2194 if_devstart(ifp); 2195 } 2196 } 2197 2198 callout_reset(&sc->re_timer, hz, re_tick, sc); 2199 } 2200 2201 #ifdef DEVICE_POLLING 2202 2203 static void 2204 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2205 { 2206 struct re_softc *sc = ifp->if_softc; 2207 2208 ASSERT_SERIALIZED(ifp->if_serializer); 2209 2210 switch(cmd) { 2211 case POLL_REGISTER: 2212 /* disable interrupts */ 2213 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 2214 break; 2215 2216 case POLL_DEREGISTER: 2217 /* enable interrupts */ 2218 re_setup_intr(sc, 1, sc->re_imtype); 2219 break; 2220 2221 default: 2222 sc->rxcycles = count; 2223 re_rxeof(sc); 2224 re_txeof(sc); 2225 2226 if (!ifq_is_empty(&ifp->if_snd)) 2227 if_devstart(ifp); 2228 2229 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2230 uint16_t status; 2231 2232 status = CSR_READ_2(sc, RE_ISR); 2233 if (status == 0xffff) 2234 return; 2235 if (status) 2236 CSR_WRITE_2(sc, RE_ISR, status); 2237 2238 /* 2239 * XXX check behaviour on receiver stalls. 2240 */ 2241 2242 if (status & RE_ISR_SYSTEM_ERR) 2243 re_init(sc); 2244 } 2245 break; 2246 } 2247 } 2248 #endif /* DEVICE_POLLING */ 2249 2250 static void 2251 re_intr(void *arg) 2252 { 2253 struct re_softc *sc = arg; 2254 struct ifnet *ifp = &sc->arpcom.ac_if; 2255 uint16_t status; 2256 int rx, tx; 2257 2258 ASSERT_SERIALIZED(ifp->if_serializer); 2259 2260 if ((sc->re_flags & RE_F_SUSPENDED) || 2261 (ifp->if_flags & IFF_RUNNING) == 0) 2262 return; 2263 2264 rx = tx = 0; 2265 for (;;) { 2266 status = CSR_READ_2(sc, RE_ISR); 2267 /* If the card has gone away the read returns 0xffff. */ 2268 if (status == 0xffff) 2269 break; 2270 if (status) 2271 CSR_WRITE_2(sc, RE_ISR, status); 2272 2273 if ((status & sc->re_intrs) == 0) 2274 break; 2275 2276 if (status & (sc->re_rx_ack | RE_ISR_RX_ERR)) 2277 rx |= re_rxeof(sc); 2278 2279 if (status & (sc->re_tx_ack | RE_ISR_TX_ERR)) 2280 tx |= re_txeof(sc); 2281 2282 if (status & RE_ISR_SYSTEM_ERR) 2283 re_init(sc); 2284 2285 if (status & RE_ISR_LINKCHG) { 2286 callout_stop(&sc->re_timer); 2287 re_tick_serialized(sc); 2288 } 2289 } 2290 2291 if (sc->re_imtype == RE_IMTYPE_SIM) { 2292 if ((sc->re_flags & RE_F_TIMER_INTR)) { 2293 if ((tx | rx) == 0) { 2294 /* 2295 * Nothing needs to be processed, fallback 2296 * to use TX/RX interrupts. 2297 */ 2298 re_setup_intr(sc, 1, RE_IMTYPE_NONE); 2299 2300 /* 2301 * Recollect, mainly to avoid the possible 2302 * race introduced by changing interrupt 2303 * masks. 2304 */ 2305 re_rxeof(sc); 2306 tx = re_txeof(sc); 2307 } else { 2308 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */ 2309 } 2310 } else if (tx | rx) { 2311 /* 2312 * Assume that using simulated interrupt moderation 2313 * (hardware timer based) could reduce the interript 2314 * rate. 2315 */ 2316 re_setup_intr(sc, 1, RE_IMTYPE_SIM); 2317 } 2318 } 2319 2320 if (tx && !ifq_is_empty(&ifp->if_snd)) 2321 if_devstart(ifp); 2322 } 2323 2324 static int 2325 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx0) 2326 { 2327 struct ifnet *ifp = &sc->arpcom.ac_if; 2328 struct mbuf *m; 2329 struct re_dmaload_arg arg; 2330 bus_dma_segment_t segs[RE_MAXSEGS]; 2331 bus_dmamap_t map; 2332 int error, maxsegs, idx, i; 2333 struct re_desc *d, *tx_ring; 2334 uint32_t cmd_csum, ctl_csum, vlantag; 2335 2336 KASSERT(sc->re_ldata.re_tx_free > RE_TXDESC_SPARE, 2337 ("not enough free TX desc\n")); 2338 2339 m = *m_head; 2340 map = sc->re_ldata.re_tx_dmamap[*idx0]; 2341 2342 /* 2343 * Set up checksum offload. Note: checksum offload bits must 2344 * appear in all descriptors of a multi-descriptor transmit 2345 * attempt. (This is according to testing done with an 8169 2346 * chip. I'm not sure if this is a requirement or a bug.) 2347 */ 2348 cmd_csum = ctl_csum = 0; 2349 if (m->m_pkthdr.csum_flags & CSUM_IP) { 2350 cmd_csum |= RE_TDESC_CMD_IPCSUM; 2351 ctl_csum |= RE_TDESC_CTL_IPCSUM; 2352 } 2353 if (m->m_pkthdr.csum_flags & CSUM_TCP) { 2354 cmd_csum |= RE_TDESC_CMD_TCPCSUM; 2355 ctl_csum |= RE_TDESC_CTL_TCPCSUM; 2356 } 2357 if (m->m_pkthdr.csum_flags & CSUM_UDP) { 2358 cmd_csum |= RE_TDESC_CMD_UDPCSUM; 2359 ctl_csum |= RE_TDESC_CTL_UDPCSUM; 2360 } 2361 2362 /* For MAC2 chips, csum flags are set on re_control */ 2363 if (sc->re_caps & RE_C_MAC2) 2364 cmd_csum = 0; 2365 else 2366 ctl_csum = 0; 2367 2368 if (m->m_pkthdr.len > sc->re_swcsum_lim && 2369 (m->m_pkthdr.csum_flags & (CSUM_DELAY_IP | CSUM_DELAY_DATA))) { 2370 struct ether_header *eh; 2371 struct ip *ip; 2372 u_short offset; 2373 2374 m = m_pullup(m, sizeof(struct ether_header *)); 2375 if (m == NULL) { 2376 *m_head = NULL; 2377 return ENOBUFS; 2378 } 2379 eh = mtod(m, struct ether_header *); 2380 2381 /* XXX */ 2382 if (eh->ether_type == ETHERTYPE_VLAN) 2383 offset = sizeof(struct ether_vlan_header); 2384 else 2385 offset = sizeof(struct ether_header); 2386 2387 m = m_pullup(m, offset + sizeof(struct ip *)); 2388 if (m == NULL) { 2389 *m_head = NULL; 2390 return ENOBUFS; 2391 } 2392 ip = (struct ip *)(mtod(m, uint8_t *) + offset); 2393 2394 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 2395 u_short csum; 2396 2397 offset += IP_VHL_HL(ip->ip_vhl) << 2; 2398 csum = in_cksum_skip(m, ntohs(ip->ip_len), offset); 2399 if (m->m_pkthdr.csum_flags & CSUM_UDP && csum == 0) 2400 csum = 0xffff; 2401 offset += m->m_pkthdr.csum_data; /* checksum offset */ 2402 *(u_short *)(m->m_data + offset) = csum; 2403 2404 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 2405 } 2406 if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 2407 ip->ip_sum = 0; 2408 if (ip->ip_vhl == IP_VHL_BORING) { 2409 ip->ip_sum = in_cksum_hdr(ip); 2410 } else { 2411 ip->ip_sum = 2412 in_cksum(m, IP_VHL_HL(ip->ip_vhl) << 2); 2413 } 2414 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP; 2415 } 2416 *m_head = m; /* 'm' may be changed by above two m_pullup() */ 2417 2418 /* Clear hardware CSUM flags */ 2419 cmd_csum = ctl_csum = 0; 2420 } 2421 2422 if ((sc->re_caps & RE_C_AUTOPAD) == 0) { 2423 /* 2424 * With some of the RealTek chips, using the checksum offload 2425 * support in conjunction with the autopadding feature results 2426 * in the transmission of corrupt frames. For example, if we 2427 * need to send a really small IP fragment that's less than 60 2428 * bytes in size, and IP header checksumming is enabled, the 2429 * resulting ethernet frame that appears on the wire will 2430 * have garbled payload. To work around this, if TX checksum 2431 * offload is enabled, we always manually pad short frames out 2432 * to the minimum ethernet frame size. 2433 * 2434 * Note: this appears unnecessary for TCP, and doing it for TCP 2435 * with PCIe adapters seems to result in bad checksums. 2436 */ 2437 if ((m->m_pkthdr.csum_flags & 2438 (CSUM_DELAY_IP | CSUM_DELAY_DATA)) && 2439 (m->m_pkthdr.csum_flags & CSUM_TCP) == 0 && 2440 m->m_pkthdr.len < RE_MIN_FRAMELEN) { 2441 error = m_devpad(m, RE_MIN_FRAMELEN); 2442 if (error) 2443 goto back; 2444 } 2445 } 2446 2447 vlantag = 0; 2448 if (m->m_flags & M_VLANTAG) { 2449 vlantag = htobe16(m->m_pkthdr.ether_vlantag) | 2450 RE_TDESC_CTL_INSTAG; 2451 } 2452 2453 maxsegs = sc->re_ldata.re_tx_free; 2454 if (maxsegs > RE_MAXSEGS) 2455 maxsegs = RE_MAXSEGS; 2456 2457 arg.re_nsegs = maxsegs; 2458 arg.re_segs = segs; 2459 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map, m, 2460 re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2461 if (error && error != EFBIG) { 2462 if_printf(ifp, "can't map mbuf (error %d)\n", error); 2463 goto back; 2464 } 2465 2466 /* 2467 * Too many segments to map, coalesce into a single mbuf 2468 */ 2469 if (!error && arg.re_nsegs == 0) { 2470 bus_dmamap_unload(sc->re_ldata.re_mtag, map); 2471 error = EFBIG; 2472 } 2473 if (error) { 2474 struct mbuf *m_new; 2475 2476 m_new = m_defrag(m, MB_DONTWAIT); 2477 if (m_new == NULL) { 2478 if_printf(ifp, "can't defrag TX mbuf\n"); 2479 error = ENOBUFS; 2480 goto back; 2481 } else { 2482 *m_head = m = m_new; 2483 } 2484 2485 arg.re_nsegs = maxsegs; 2486 arg.re_segs = segs; 2487 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map, m, 2488 re_dma_map_desc, &arg, 2489 BUS_DMA_NOWAIT); 2490 if (error || arg.re_nsegs == 0) { 2491 if (!error) { 2492 bus_dmamap_unload(sc->re_ldata.re_mtag, map); 2493 error = EFBIG; 2494 } 2495 if_printf(ifp, "can't map mbuf (error %d)\n", error); 2496 goto back; 2497 } 2498 } 2499 bus_dmamap_sync(sc->re_ldata.re_mtag, map, BUS_DMASYNC_PREWRITE); 2500 2501 /* 2502 * Map the segment array into descriptors. We also keep track 2503 * of the end of the ring and set the end-of-ring bits as needed, 2504 * and we set the ownership bits in all except the very first 2505 * descriptor, whose ownership bits will be turned on later. 2506 */ 2507 tx_ring = sc->re_ldata.re_tx_list; 2508 idx = *idx0; 2509 i = 0; 2510 for (;;) { 2511 uint32_t cmdstat; 2512 2513 d = &tx_ring[idx]; 2514 2515 cmdstat = segs[i].ds_len; 2516 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr)); 2517 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr)); 2518 if (i == 0) 2519 cmdstat |= RE_TDESC_CMD_SOF; 2520 else 2521 cmdstat |= RE_TDESC_CMD_OWN; 2522 if (idx == (sc->re_tx_desc_cnt - 1)) 2523 cmdstat |= RE_TDESC_CMD_EOR; 2524 d->re_cmdstat = htole32(cmdstat | cmd_csum); 2525 d->re_control = htole32(ctl_csum | vlantag); 2526 2527 i++; 2528 if (i == arg.re_nsegs) 2529 break; 2530 RE_TXDESC_INC(sc, idx); 2531 } 2532 d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF); 2533 2534 /* Transfer ownership of packet to the chip. */ 2535 d->re_cmdstat |= htole32(RE_TDESC_CMD_OWN); 2536 if (*idx0 != idx) 2537 tx_ring[*idx0].re_cmdstat |= htole32(RE_TDESC_CMD_OWN); 2538 2539 /* 2540 * Insure that the map for this transmission 2541 * is placed at the array index of the last descriptor 2542 * in this chain. 2543 */ 2544 sc->re_ldata.re_tx_dmamap[*idx0] = sc->re_ldata.re_tx_dmamap[idx]; 2545 sc->re_ldata.re_tx_dmamap[idx] = map; 2546 2547 sc->re_ldata.re_tx_mbuf[idx] = m; 2548 sc->re_ldata.re_tx_free -= arg.re_nsegs; 2549 2550 RE_TXDESC_INC(sc, idx); 2551 *idx0 = idx; 2552 back: 2553 if (error) { 2554 m_freem(m); 2555 *m_head = NULL; 2556 } 2557 return error; 2558 } 2559 2560 /* 2561 * Main transmit routine for C+ and gigE NICs. 2562 */ 2563 2564 static void 2565 re_start(struct ifnet *ifp) 2566 { 2567 struct re_softc *sc = ifp->if_softc; 2568 struct mbuf *m_head; 2569 int idx, need_trans, oactive, error; 2570 2571 ASSERT_SERIALIZED(ifp->if_serializer); 2572 2573 if ((sc->re_flags & RE_F_LINKED) == 0) { 2574 ifq_purge(&ifp->if_snd); 2575 return; 2576 } 2577 2578 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 2579 return; 2580 2581 idx = sc->re_ldata.re_tx_prodidx; 2582 2583 need_trans = 0; 2584 oactive = 0; 2585 while (sc->re_ldata.re_tx_mbuf[idx] == NULL) { 2586 if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) { 2587 if (!oactive) { 2588 if (re_tx_collect(sc)) { 2589 oactive = 1; 2590 continue; 2591 } 2592 } 2593 ifp->if_flags |= IFF_OACTIVE; 2594 break; 2595 } 2596 2597 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2598 if (m_head == NULL) 2599 break; 2600 2601 error = re_encap(sc, &m_head, &idx); 2602 if (error) { 2603 /* m_head is freed by re_encap(), if we reach here */ 2604 ifp->if_oerrors++; 2605 2606 if (error == EFBIG && !oactive) { 2607 if (re_tx_collect(sc)) { 2608 oactive = 1; 2609 continue; 2610 } 2611 } 2612 ifp->if_flags |= IFF_OACTIVE; 2613 break; 2614 } 2615 2616 oactive = 0; 2617 need_trans = 1; 2618 2619 /* 2620 * If there's a BPF listener, bounce a copy of this frame 2621 * to him. 2622 */ 2623 ETHER_BPF_MTAP(ifp, m_head); 2624 } 2625 2626 if (!need_trans) 2627 return; 2628 2629 /* Flush the TX descriptors */ 2630 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 2631 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE); 2632 2633 sc->re_ldata.re_tx_prodidx = idx; 2634 2635 /* 2636 * RealTek put the TX poll request register in a different 2637 * location on the 8169 gigE chip. I don't know why. 2638 */ 2639 CSR_WRITE_1(sc, sc->re_txstart, RE_TXSTART_START); 2640 2641 /* 2642 * Set a timeout in case the chip goes out to lunch. 2643 */ 2644 ifp->if_timer = 5; 2645 } 2646 2647 static void 2648 re_init(void *xsc) 2649 { 2650 struct re_softc *sc = xsc; 2651 struct ifnet *ifp = &sc->arpcom.ac_if; 2652 struct mii_data *mii; 2653 int error, framelen; 2654 2655 ASSERT_SERIALIZED(ifp->if_serializer); 2656 2657 mii = device_get_softc(sc->re_miibus); 2658 2659 /* 2660 * Cancel pending I/O and free all RX/TX buffers. 2661 */ 2662 re_stop(sc); 2663 2664 if (sc->re_caps & RE_C_CONTIGRX) { 2665 if (ifp->if_mtu > ETHERMTU) { 2666 KKASSERT(sc->re_ldata.re_jbuf != NULL); 2667 sc->re_flags |= RE_F_USE_JPOOL; 2668 sc->re_rxbuf_size = RE_FRAMELEN_MAX; 2669 sc->re_newbuf = re_newbuf_jumbo; 2670 } else { 2671 sc->re_flags &= ~RE_F_USE_JPOOL; 2672 sc->re_rxbuf_size = MCLBYTES; 2673 sc->re_newbuf = re_newbuf_std; 2674 } 2675 } 2676 2677 /* 2678 * Adjust max read request size according to MTU. 2679 * Mainly to improve TX performance for common case (ETHERMTU). 2680 */ 2681 if (sc->re_caps & RE_C_PCIE) { 2682 if (ifp->if_mtu > ETHERMTU) { 2683 /* 2684 * 512 seems to be the only value that works 2685 * reliably with jumbo frame 2686 */ 2687 pcie_set_max_readrq(sc->re_dev, 2688 PCIEM_DEVCTL_MAX_READRQ_512); 2689 } else { 2690 pcie_set_max_readrq(sc->re_dev, 2691 PCIEM_DEVCTL_MAX_READRQ_4096); 2692 } 2693 } 2694 2695 /* 2696 * Enable C+ RX and TX mode, as well as VLAN stripping and 2697 * RX checksum offload. We must configure the C+ register 2698 * before all others. 2699 */ 2700 CSR_WRITE_2(sc, RE_CPLUS_CMD, RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB | 2701 RE_CPLUSCMD_PCI_MRW | 2702 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 2703 RE_CPLUSCMD_VLANSTRIP : 0) | 2704 (ifp->if_capenable & IFCAP_RXCSUM ? 2705 RE_CPLUSCMD_RXCSUM_ENB : 0)); 2706 2707 /* 2708 * Init our MAC address. Even though the chipset 2709 * documentation doesn't mention it, we need to enter "Config 2710 * register write enable" mode to modify the ID registers. 2711 */ 2712 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_WRITECFG); 2713 CSR_WRITE_4(sc, RE_IDR0, 2714 htole32(*(uint32_t *)(&sc->arpcom.ac_enaddr[0]))); 2715 CSR_WRITE_2(sc, RE_IDR4, 2716 htole16(*(uint16_t *)(&sc->arpcom.ac_enaddr[4]))); 2717 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF); 2718 2719 /* 2720 * For C+ mode, initialize the RX descriptors and mbufs. 2721 */ 2722 error = re_rx_list_init(sc); 2723 if (error) { 2724 re_stop(sc); 2725 return; 2726 } 2727 error = re_tx_list_init(sc); 2728 if (error) { 2729 re_stop(sc); 2730 return; 2731 } 2732 2733 /* 2734 * Load the addresses of the RX and TX lists into the chip. 2735 */ 2736 CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI, 2737 RE_ADDR_HI(sc->re_ldata.re_rx_list_addr)); 2738 CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO, 2739 RE_ADDR_LO(sc->re_ldata.re_rx_list_addr)); 2740 2741 CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI, 2742 RE_ADDR_HI(sc->re_ldata.re_tx_list_addr)); 2743 CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO, 2744 RE_ADDR_LO(sc->re_ldata.re_tx_list_addr)); 2745 2746 /* 2747 * Enable transmit and receive. 2748 */ 2749 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB); 2750 2751 /* 2752 * Set the initial TX and RX configuration. 2753 */ 2754 if (sc->re_flags & RE_F_TESTMODE) { 2755 if (!RE_IS_8139CP(sc)) 2756 CSR_WRITE_4(sc, RE_TXCFG, 2757 RE_TXCFG_CONFIG | RE_LOOPTEST_ON); 2758 else 2759 CSR_WRITE_4(sc, RE_TXCFG, 2760 RE_TXCFG_CONFIG | RE_LOOPTEST_ON_CPLUS); 2761 } else 2762 CSR_WRITE_4(sc, RE_TXCFG, RE_TXCFG_CONFIG); 2763 2764 framelen = RE_FRAMELEN(ifp->if_mtu); 2765 if (framelen < MCLBYTES) 2766 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, howmany(MCLBYTES, 128)); 2767 else 2768 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, howmany(framelen, 128)); 2769 2770 CSR_WRITE_4(sc, RE_RXCFG, RE_RXCFG_CONFIG); 2771 2772 /* 2773 * Program the multicast filter, if necessary. 2774 */ 2775 re_setmulti(sc); 2776 2777 #ifdef DEVICE_POLLING 2778 /* 2779 * Disable interrupts if we are polling. 2780 */ 2781 if (ifp->if_flags & IFF_POLLING) 2782 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 2783 else /* otherwise ... */ 2784 #endif /* DEVICE_POLLING */ 2785 /* 2786 * Enable interrupts. 2787 */ 2788 if (sc->re_flags & RE_F_TESTMODE) 2789 CSR_WRITE_2(sc, RE_IMR, 0); 2790 else 2791 re_setup_intr(sc, 1, sc->re_imtype); 2792 CSR_WRITE_2(sc, RE_ISR, sc->re_intrs); 2793 2794 /* Start RX/TX process. */ 2795 CSR_WRITE_4(sc, RE_MISSEDPKT, 0); 2796 2797 #ifdef notdef 2798 /* Enable receiver and transmitter. */ 2799 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB); 2800 #endif 2801 2802 /* 2803 * For 8169 gigE NICs, set the max allowed RX packet 2804 * size so we can receive jumbo frames. 2805 */ 2806 if (!RE_IS_8139CP(sc)) { 2807 if (sc->re_caps & RE_C_CONTIGRX) 2808 CSR_WRITE_2(sc, RE_MAXRXPKTLEN, sc->re_rxbuf_size); 2809 else 2810 CSR_WRITE_2(sc, RE_MAXRXPKTLEN, 16383); 2811 } 2812 2813 if (sc->re_flags & RE_F_TESTMODE) 2814 return; 2815 2816 mii_mediachg(mii); 2817 2818 CSR_WRITE_1(sc, RE_CFG1, RE_CFG1_DRVLOAD|RE_CFG1_FULLDUPLEX); 2819 2820 ifp->if_flags |= IFF_RUNNING; 2821 ifp->if_flags &= ~IFF_OACTIVE; 2822 2823 callout_reset(&sc->re_timer, hz, re_tick, sc); 2824 } 2825 2826 /* 2827 * Set media options. 2828 */ 2829 static int 2830 re_ifmedia_upd(struct ifnet *ifp) 2831 { 2832 struct re_softc *sc = ifp->if_softc; 2833 struct mii_data *mii; 2834 2835 ASSERT_SERIALIZED(ifp->if_serializer); 2836 2837 mii = device_get_softc(sc->re_miibus); 2838 mii_mediachg(mii); 2839 2840 return(0); 2841 } 2842 2843 /* 2844 * Report current media status. 2845 */ 2846 static void 2847 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2848 { 2849 struct re_softc *sc = ifp->if_softc; 2850 struct mii_data *mii; 2851 2852 ASSERT_SERIALIZED(ifp->if_serializer); 2853 2854 mii = device_get_softc(sc->re_miibus); 2855 2856 mii_pollstat(mii); 2857 ifmr->ifm_active = mii->mii_media_active; 2858 ifmr->ifm_status = mii->mii_media_status; 2859 } 2860 2861 static int 2862 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2863 { 2864 struct re_softc *sc = ifp->if_softc; 2865 struct ifreq *ifr = (struct ifreq *) data; 2866 struct mii_data *mii; 2867 int error = 0, mask; 2868 2869 ASSERT_SERIALIZED(ifp->if_serializer); 2870 2871 switch(command) { 2872 case SIOCSIFMTU: 2873 if (ifr->ifr_mtu > sc->re_maxmtu) { 2874 error = EINVAL; 2875 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2876 ifp->if_mtu = ifr->ifr_mtu; 2877 if (ifp->if_flags & IFF_RUNNING) 2878 ifp->if_init(sc); 2879 } 2880 break; 2881 2882 case SIOCSIFFLAGS: 2883 if (ifp->if_flags & IFF_UP) { 2884 if (ifp->if_flags & IFF_RUNNING) { 2885 if ((ifp->if_flags ^ sc->re_if_flags) & 2886 (IFF_PROMISC | IFF_ALLMULTI)) 2887 re_setmulti(sc); 2888 } else { 2889 re_init(sc); 2890 } 2891 } else if (ifp->if_flags & IFF_RUNNING) { 2892 re_stop(sc); 2893 } 2894 sc->re_if_flags = ifp->if_flags; 2895 break; 2896 2897 case SIOCADDMULTI: 2898 case SIOCDELMULTI: 2899 re_setmulti(sc); 2900 break; 2901 2902 case SIOCGIFMEDIA: 2903 case SIOCSIFMEDIA: 2904 mii = device_get_softc(sc->re_miibus); 2905 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2906 break; 2907 2908 case SIOCSIFCAP: 2909 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & 2910 ifp->if_capabilities; 2911 ifp->if_capenable ^= mask; 2912 2913 if (mask & IFCAP_HWCSUM) { 2914 if (ifp->if_capenable & IFCAP_TXCSUM) 2915 ifp->if_hwassist = RE_CSUM_FEATURES; 2916 else 2917 ifp->if_hwassist = 0; 2918 } 2919 if (mask && (ifp->if_flags & IFF_RUNNING)) 2920 re_init(sc); 2921 break; 2922 2923 default: 2924 error = ether_ioctl(ifp, command, data); 2925 break; 2926 } 2927 return(error); 2928 } 2929 2930 static void 2931 re_watchdog(struct ifnet *ifp) 2932 { 2933 struct re_softc *sc = ifp->if_softc; 2934 2935 ASSERT_SERIALIZED(ifp->if_serializer); 2936 2937 if_printf(ifp, "watchdog timeout\n"); 2938 2939 ifp->if_oerrors++; 2940 2941 re_txeof(sc); 2942 re_rxeof(sc); 2943 2944 re_init(sc); 2945 2946 if (!ifq_is_empty(&ifp->if_snd)) 2947 if_devstart(ifp); 2948 } 2949 2950 /* 2951 * Stop the adapter and free any mbufs allocated to the 2952 * RX and TX lists. 2953 */ 2954 static void 2955 re_stop(struct re_softc *sc) 2956 { 2957 struct ifnet *ifp = &sc->arpcom.ac_if; 2958 int i; 2959 2960 ASSERT_SERIALIZED(ifp->if_serializer); 2961 2962 /* Reset the adapter. */ 2963 re_reset(sc, ifp->if_flags & IFF_RUNNING); 2964 2965 ifp->if_timer = 0; 2966 callout_stop(&sc->re_timer); 2967 2968 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2969 sc->re_flags &= ~(RE_F_TIMER_INTR | RE_F_DROP_RXFRAG | RE_F_LINKED); 2970 2971 CSR_WRITE_1(sc, RE_COMMAND, 0x00); 2972 CSR_WRITE_2(sc, RE_IMR, 0x0000); 2973 CSR_WRITE_2(sc, RE_ISR, 0xFFFF); 2974 2975 re_free_rxchain(sc); 2976 2977 /* Free the TX list buffers. */ 2978 for (i = 0; i < sc->re_tx_desc_cnt; i++) { 2979 if (sc->re_ldata.re_tx_mbuf[i] != NULL) { 2980 bus_dmamap_unload(sc->re_ldata.re_mtag, 2981 sc->re_ldata.re_tx_dmamap[i]); 2982 m_freem(sc->re_ldata.re_tx_mbuf[i]); 2983 sc->re_ldata.re_tx_mbuf[i] = NULL; 2984 } 2985 } 2986 2987 /* Free the RX list buffers. */ 2988 for (i = 0; i < sc->re_rx_desc_cnt; i++) { 2989 if (sc->re_ldata.re_rx_mbuf[i] != NULL) { 2990 if ((sc->re_flags & RE_F_USE_JPOOL) == 0) { 2991 bus_dmamap_unload(sc->re_ldata.re_mtag, 2992 sc->re_ldata.re_rx_dmamap[i]); 2993 } 2994 m_freem(sc->re_ldata.re_rx_mbuf[i]); 2995 sc->re_ldata.re_rx_mbuf[i] = NULL; 2996 } 2997 } 2998 } 2999 3000 /* 3001 * Device suspend routine. Stop the interface and save some PCI 3002 * settings in case the BIOS doesn't restore them properly on 3003 * resume. 3004 */ 3005 static int 3006 re_suspend(device_t dev) 3007 { 3008 #ifndef BURN_BRIDGES 3009 int i; 3010 #endif 3011 struct re_softc *sc = device_get_softc(dev); 3012 struct ifnet *ifp = &sc->arpcom.ac_if; 3013 3014 lwkt_serialize_enter(ifp->if_serializer); 3015 3016 re_stop(sc); 3017 3018 #ifndef BURN_BRIDGES 3019 for (i = 0; i < 5; i++) 3020 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3021 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3022 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3023 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3024 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3025 #endif 3026 3027 sc->re_flags |= RE_F_SUSPENDED; 3028 3029 lwkt_serialize_exit(ifp->if_serializer); 3030 3031 return (0); 3032 } 3033 3034 /* 3035 * Device resume routine. Restore some PCI settings in case the BIOS 3036 * doesn't, re-enable busmastering, and restart the interface if 3037 * appropriate. 3038 */ 3039 static int 3040 re_resume(device_t dev) 3041 { 3042 struct re_softc *sc = device_get_softc(dev); 3043 struct ifnet *ifp = &sc->arpcom.ac_if; 3044 #ifndef BURN_BRIDGES 3045 int i; 3046 #endif 3047 3048 lwkt_serialize_enter(ifp->if_serializer); 3049 3050 #ifndef BURN_BRIDGES 3051 /* better way to do this? */ 3052 for (i = 0; i < 5; i++) 3053 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3054 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3055 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3056 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3057 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3058 3059 /* reenable busmastering */ 3060 pci_enable_busmaster(dev); 3061 pci_enable_io(dev, SYS_RES_IOPORT); 3062 #endif 3063 3064 /* reinitialize interface if necessary */ 3065 if (ifp->if_flags & IFF_UP) 3066 re_init(sc); 3067 3068 sc->re_flags &= ~RE_F_SUSPENDED; 3069 3070 lwkt_serialize_exit(ifp->if_serializer); 3071 3072 return (0); 3073 } 3074 3075 /* 3076 * Stop all chip I/O so that the kernel's probe routines don't 3077 * get confused by errant DMAs when rebooting. 3078 */ 3079 static void 3080 re_shutdown(device_t dev) 3081 { 3082 struct re_softc *sc = device_get_softc(dev); 3083 struct ifnet *ifp = &sc->arpcom.ac_if; 3084 3085 lwkt_serialize_enter(ifp->if_serializer); 3086 re_stop(sc); 3087 lwkt_serialize_exit(ifp->if_serializer); 3088 } 3089 3090 static int 3091 re_sysctl_rxtime(SYSCTL_HANDLER_ARGS) 3092 { 3093 struct re_softc *sc = arg1; 3094 3095 return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_rx_time); 3096 } 3097 3098 static int 3099 re_sysctl_txtime(SYSCTL_HANDLER_ARGS) 3100 { 3101 struct re_softc *sc = arg1; 3102 3103 return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_tx_time); 3104 } 3105 3106 static int 3107 re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *hwtime) 3108 { 3109 struct re_softc *sc = arg1; 3110 struct ifnet *ifp = &sc->arpcom.ac_if; 3111 int error, v; 3112 3113 lwkt_serialize_enter(ifp->if_serializer); 3114 3115 v = *hwtime; 3116 error = sysctl_handle_int(oidp, &v, 0, req); 3117 if (error || req->newptr == NULL) 3118 goto back; 3119 3120 if (v <= 0) { 3121 error = EINVAL; 3122 goto back; 3123 } 3124 3125 if (v != *hwtime) { 3126 *hwtime = v; 3127 3128 if ((ifp->if_flags & (IFF_RUNNING | IFF_POLLING)) == 3129 IFF_RUNNING && sc->re_imtype == RE_IMTYPE_HW) 3130 re_setup_hw_im(sc); 3131 } 3132 back: 3133 lwkt_serialize_exit(ifp->if_serializer); 3134 return error; 3135 } 3136 3137 static int 3138 re_sysctl_simtime(SYSCTL_HANDLER_ARGS) 3139 { 3140 struct re_softc *sc = arg1; 3141 struct ifnet *ifp = &sc->arpcom.ac_if; 3142 int error, v; 3143 3144 lwkt_serialize_enter(ifp->if_serializer); 3145 3146 v = sc->re_sim_time; 3147 error = sysctl_handle_int(oidp, &v, 0, req); 3148 if (error || req->newptr == NULL) 3149 goto back; 3150 3151 if (v <= 0) { 3152 error = EINVAL; 3153 goto back; 3154 } 3155 3156 if (v != sc->re_sim_time) { 3157 sc->re_sim_time = v; 3158 3159 if ((ifp->if_flags & (IFF_RUNNING | IFF_POLLING)) == 3160 IFF_RUNNING && sc->re_imtype == RE_IMTYPE_SIM) { 3161 #ifdef foo 3162 int reg; 3163 3164 /* 3165 * Following code causes various strange 3166 * performance problems. Hmm ... 3167 */ 3168 CSR_WRITE_2(sc, RE_IMR, 0); 3169 if (!RE_IS_8139CP(sc)) 3170 reg = RE_TIMERINT_8169; 3171 else 3172 reg = RE_TIMERINT; 3173 CSR_WRITE_4(sc, reg, 0); 3174 CSR_READ_4(sc, reg); /* flush */ 3175 3176 CSR_WRITE_2(sc, RE_IMR, sc->re_intrs); 3177 re_setup_sim_im(sc); 3178 #else 3179 re_setup_intr(sc, 0, RE_IMTYPE_NONE); 3180 DELAY(10); 3181 re_setup_intr(sc, 1, RE_IMTYPE_SIM); 3182 #endif 3183 } 3184 } 3185 back: 3186 lwkt_serialize_exit(ifp->if_serializer); 3187 return error; 3188 } 3189 3190 static int 3191 re_sysctl_imtype(SYSCTL_HANDLER_ARGS) 3192 { 3193 struct re_softc *sc = arg1; 3194 struct ifnet *ifp = &sc->arpcom.ac_if; 3195 int error, v; 3196 3197 lwkt_serialize_enter(ifp->if_serializer); 3198 3199 v = sc->re_imtype; 3200 error = sysctl_handle_int(oidp, &v, 0, req); 3201 if (error || req->newptr == NULL) 3202 goto back; 3203 3204 if (v != RE_IMTYPE_HW && v != RE_IMTYPE_SIM && v != RE_IMTYPE_NONE) { 3205 error = EINVAL; 3206 goto back; 3207 } 3208 if (v == RE_IMTYPE_HW && (sc->re_caps & RE_C_HWIM) == 0) { 3209 /* Can't do hardware interrupt moderation */ 3210 error = EOPNOTSUPP; 3211 goto back; 3212 } 3213 3214 if (v != sc->re_imtype) { 3215 sc->re_imtype = v; 3216 if ((ifp->if_flags & (IFF_RUNNING | IFF_POLLING)) == 3217 IFF_RUNNING) 3218 re_setup_intr(sc, 1, sc->re_imtype); 3219 } 3220 back: 3221 lwkt_serialize_exit(ifp->if_serializer); 3222 return error; 3223 } 3224 3225 static void 3226 re_setup_hw_im(struct re_softc *sc) 3227 { 3228 KKASSERT(sc->re_caps & RE_C_HWIM); 3229 3230 /* 3231 * Interrupt moderation 3232 * 3233 * 0xABCD 3234 * A - unknown (maybe TX related) 3235 * B - TX timer (unit: 25us) 3236 * C - unknown (maybe RX related) 3237 * D - RX timer (unit: 25us) 3238 * 3239 * 3240 * re(4)'s interrupt moderation is actually controlled by 3241 * two variables, like most other NICs (bge, bce etc.) 3242 * o timer 3243 * o number of packets [P] 3244 * 3245 * The logic relationship between these two variables is 3246 * similar to other NICs too: 3247 * if (timer expire || packets > [P]) 3248 * Interrupt is delivered 3249 * 3250 * Currently we only know how to set 'timer', but not 3251 * 'number of packets', which should be ~30, as far as I 3252 * tested (sink ~900Kpps, interrupt rate is 30KHz) 3253 */ 3254 CSR_WRITE_2(sc, RE_IM, 3255 RE_IM_RXTIME(sc->re_rx_time) | 3256 RE_IM_TXTIME(sc->re_tx_time) | 3257 RE_IM_MAGIC); 3258 } 3259 3260 static void 3261 re_disable_hw_im(struct re_softc *sc) 3262 { 3263 if (sc->re_caps & RE_C_HWIM) 3264 CSR_WRITE_2(sc, RE_IM, 0); 3265 } 3266 3267 static void 3268 re_setup_sim_im(struct re_softc *sc) 3269 { 3270 if (!RE_IS_8139CP(sc)) { 3271 uint32_t ticks; 3272 3273 /* 3274 * Datasheet says tick decreases at bus speed, 3275 * but it seems the clock runs a little bit 3276 * faster, so we do some compensation here. 3277 */ 3278 ticks = (sc->re_sim_time * sc->re_bus_speed * 8) / 5; 3279 CSR_WRITE_4(sc, RE_TIMERINT_8169, ticks); 3280 } else { 3281 CSR_WRITE_4(sc, RE_TIMERINT, 0x400); /* XXX */ 3282 } 3283 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */ 3284 sc->re_flags |= RE_F_TIMER_INTR; 3285 } 3286 3287 static void 3288 re_disable_sim_im(struct re_softc *sc) 3289 { 3290 if (!RE_IS_8139CP(sc)) 3291 CSR_WRITE_4(sc, RE_TIMERINT_8169, 0); 3292 else 3293 CSR_WRITE_4(sc, RE_TIMERINT, 0); 3294 sc->re_flags &= ~RE_F_TIMER_INTR; 3295 } 3296 3297 static void 3298 re_config_imtype(struct re_softc *sc, int imtype) 3299 { 3300 switch (imtype) { 3301 case RE_IMTYPE_HW: 3302 KKASSERT(sc->re_caps & RE_C_HWIM); 3303 /* FALL THROUGH */ 3304 case RE_IMTYPE_NONE: 3305 sc->re_intrs = RE_INTRS; 3306 sc->re_rx_ack = RE_ISR_RX_OK | RE_ISR_FIFO_OFLOW | 3307 RE_ISR_RX_OVERRUN; 3308 sc->re_tx_ack = RE_ISR_TX_OK; 3309 break; 3310 3311 case RE_IMTYPE_SIM: 3312 sc->re_intrs = RE_INTRS_TIMER; 3313 sc->re_rx_ack = RE_ISR_TIMEOUT_EXPIRED; 3314 sc->re_tx_ack = RE_ISR_TIMEOUT_EXPIRED; 3315 break; 3316 3317 default: 3318 panic("%s: unknown imtype %d\n", 3319 sc->arpcom.ac_if.if_xname, imtype); 3320 } 3321 } 3322 3323 static void 3324 re_setup_intr(struct re_softc *sc, int enable_intrs, int imtype) 3325 { 3326 re_config_imtype(sc, imtype); 3327 3328 if (enable_intrs) 3329 CSR_WRITE_2(sc, RE_IMR, sc->re_intrs); 3330 else 3331 CSR_WRITE_2(sc, RE_IMR, 0); 3332 3333 switch (imtype) { 3334 case RE_IMTYPE_NONE: 3335 re_disable_sim_im(sc); 3336 re_disable_hw_im(sc); 3337 break; 3338 3339 case RE_IMTYPE_HW: 3340 KKASSERT(sc->re_caps & RE_C_HWIM); 3341 re_disable_sim_im(sc); 3342 re_setup_hw_im(sc); 3343 break; 3344 3345 case RE_IMTYPE_SIM: 3346 re_disable_hw_im(sc); 3347 re_setup_sim_im(sc); 3348 break; 3349 3350 default: 3351 panic("%s: unknown imtype %d\n", 3352 sc->arpcom.ac_if.if_xname, imtype); 3353 } 3354 } 3355 3356 static void 3357 re_get_eaddr(struct re_softc *sc, uint8_t *eaddr) 3358 { 3359 int i; 3360 3361 if (sc->re_macver == RE_MACVER_11 || sc->re_macver == RE_MACVER_12) { 3362 uint16_t re_did; 3363 3364 re_get_eewidth(sc); 3365 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 3366 if (re_did == 0x8128) { 3367 uint16_t as[ETHER_ADDR_LEN / 2]; 3368 3369 /* 3370 * Get station address from the EEPROM. 3371 */ 3372 re_read_eeprom(sc, (caddr_t)as, RE_EE_EADDR, 3); 3373 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 3374 as[i] = le16toh(as[i]); 3375 bcopy(as, eaddr, sizeof(eaddr)); 3376 return; 3377 } 3378 } 3379 3380 /* 3381 * Get station address from IDRx. 3382 */ 3383 for (i = 0; i < ETHER_ADDR_LEN; ++i) 3384 eaddr[i] = CSR_READ_1(sc, RE_IDR0 + i); 3385 } 3386 3387 static int 3388 re_jpool_alloc(struct re_softc *sc) 3389 { 3390 struct re_list_data *ldata = &sc->re_ldata; 3391 struct re_jbuf *jbuf; 3392 bus_addr_t paddr; 3393 bus_size_t jpool_size; 3394 caddr_t buf; 3395 int i, error; 3396 3397 lwkt_serialize_init(&ldata->re_jbuf_serializer); 3398 3399 ldata->re_jbuf = kmalloc(sizeof(struct re_jbuf) * RE_JBUF_COUNT(sc), 3400 M_DEVBUF, M_WAITOK | M_ZERO); 3401 3402 jpool_size = RE_JBUF_COUNT(sc) * RE_JBUF_SIZE; 3403 3404 error = bus_dma_tag_create(sc->re_parent_tag, 3405 RE_BUF_ALIGN, 0, /* alignment, boundary */ 3406 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 3407 BUS_SPACE_MAXADDR, /* highaddr */ 3408 NULL, NULL, /* filter, filterarg */ 3409 jpool_size, 1, /* nsegments, maxsize */ 3410 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 3411 BUS_DMA_ALLOCNOW, /* flags */ 3412 &ldata->re_jpool_tag); 3413 if (error) { 3414 device_printf(sc->re_dev, "could not allocate jumbo dma tag\n"); 3415 return error; 3416 } 3417 3418 error = bus_dmamem_alloc(ldata->re_jpool_tag, (void **)&ldata->re_jpool, 3419 BUS_DMA_WAITOK, &ldata->re_jpool_map); 3420 if (error) { 3421 device_printf(sc->re_dev, 3422 "could not allocate jumbo dma memory\n"); 3423 bus_dma_tag_destroy(ldata->re_jpool_tag); 3424 ldata->re_jpool_tag = NULL; 3425 return error; 3426 } 3427 3428 error = bus_dmamap_load(ldata->re_jpool_tag, ldata->re_jpool_map, 3429 ldata->re_jpool, jpool_size, 3430 re_dma_map_addr, &paddr, BUS_DMA_WAITOK); 3431 if (error) { 3432 device_printf(sc->re_dev, "could not load jumbo dma map\n"); 3433 bus_dmamem_free(ldata->re_jpool_tag, ldata->re_jpool, 3434 ldata->re_jpool_map); 3435 bus_dma_tag_destroy(ldata->re_jpool_tag); 3436 ldata->re_jpool_tag = NULL; 3437 return error; 3438 } 3439 3440 /* ..and split it into 9KB chunks */ 3441 SLIST_INIT(&ldata->re_jbuf_free); 3442 3443 buf = ldata->re_jpool; 3444 for (i = 0; i < RE_JBUF_COUNT(sc); i++) { 3445 jbuf = &ldata->re_jbuf[i]; 3446 3447 jbuf->re_sc = sc; 3448 jbuf->re_inuse = 0; 3449 jbuf->re_slot = i; 3450 jbuf->re_buf = buf; 3451 jbuf->re_paddr = paddr; 3452 3453 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link); 3454 3455 buf += RE_JBUF_SIZE; 3456 paddr += RE_JBUF_SIZE; 3457 } 3458 return 0; 3459 } 3460 3461 static void 3462 re_jpool_free(struct re_softc *sc) 3463 { 3464 struct re_list_data *ldata = &sc->re_ldata; 3465 3466 if (ldata->re_jpool_tag != NULL) { 3467 bus_dmamap_unload(ldata->re_jpool_tag, ldata->re_jpool_map); 3468 bus_dmamem_free(ldata->re_jpool_tag, ldata->re_jpool, 3469 ldata->re_jpool_map); 3470 bus_dma_tag_destroy(ldata->re_jpool_tag); 3471 ldata->re_jpool_tag = NULL; 3472 } 3473 3474 if (ldata->re_jbuf != NULL) { 3475 kfree(ldata->re_jbuf, M_DEVBUF); 3476 ldata->re_jbuf = NULL; 3477 } 3478 } 3479 3480 static struct re_jbuf * 3481 re_jbuf_alloc(struct re_softc *sc) 3482 { 3483 struct re_list_data *ldata = &sc->re_ldata; 3484 struct re_jbuf *jbuf; 3485 3486 lwkt_serialize_enter(&ldata->re_jbuf_serializer); 3487 3488 jbuf = SLIST_FIRST(&ldata->re_jbuf_free); 3489 if (jbuf != NULL) { 3490 SLIST_REMOVE_HEAD(&ldata->re_jbuf_free, re_link); 3491 jbuf->re_inuse = 1; 3492 } 3493 3494 lwkt_serialize_exit(&ldata->re_jbuf_serializer); 3495 3496 return jbuf; 3497 } 3498 3499 static void 3500 re_jbuf_free(void *arg) 3501 { 3502 struct re_jbuf *jbuf = arg; 3503 struct re_softc *sc = jbuf->re_sc; 3504 struct re_list_data *ldata = &sc->re_ldata; 3505 3506 if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) { 3507 panic("%s: free wrong jumbo buffer\n", 3508 sc->arpcom.ac_if.if_xname); 3509 } else if (jbuf->re_inuse == 0) { 3510 panic("%s: jumbo buffer already freed\n", 3511 sc->arpcom.ac_if.if_xname); 3512 } 3513 3514 lwkt_serialize_enter(&ldata->re_jbuf_serializer); 3515 atomic_subtract_int(&jbuf->re_inuse, 1); 3516 if (jbuf->re_inuse == 0) 3517 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link); 3518 lwkt_serialize_exit(&ldata->re_jbuf_serializer); 3519 } 3520 3521 static void 3522 re_jbuf_ref(void *arg) 3523 { 3524 struct re_jbuf *jbuf = arg; 3525 struct re_softc *sc = jbuf->re_sc; 3526 struct re_list_data *ldata = &sc->re_ldata; 3527 3528 if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) { 3529 panic("%s: ref wrong jumbo buffer\n", 3530 sc->arpcom.ac_if.if_xname); 3531 } else if (jbuf->re_inuse == 0) { 3532 panic("%s: jumbo buffer already freed\n", 3533 sc->arpcom.ac_if.if_xname); 3534 } 3535 atomic_add_int(&jbuf->re_inuse, 1); 3536 } 3537