1 /* $OpenBSD: re.c,v 1.207 2020/08/26 03:29:06 visa Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Realtek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support Realtek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the Realtek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 #include <sys/atomic.h> 124 125 #include <machine/bus.h> 126 127 #include <net/if.h> 128 #include <net/if_media.h> 129 130 #include <netinet/in.h> 131 #include <netinet/ip.h> 132 #include <netinet/if_ether.h> 133 134 #if NBPFILTER > 0 135 #include <net/bpf.h> 136 #endif 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 141 #include <dev/pci/pcidevs.h> 142 143 #include <dev/ic/rtl81x9reg.h> 144 #include <dev/ic/revar.h> 145 146 #ifdef RE_DEBUG 147 int redebug = 0; 148 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 149 #else 150 #define DPRINTF(x) 151 #endif 152 153 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 154 155 int re_encap(struct rl_softc *, unsigned int, struct mbuf *); 156 157 int re_newbuf(struct rl_softc *); 158 int re_rx_list_init(struct rl_softc *); 159 void re_rx_list_fill(struct rl_softc *); 160 int re_tx_list_init(struct rl_softc *); 161 int re_rxeof(struct rl_softc *); 162 int re_txeof(struct rl_softc *); 163 void re_tick(void *); 164 void re_start(struct ifqueue *); 165 void re_txstart(void *); 166 int re_ioctl(struct ifnet *, u_long, caddr_t); 167 void re_watchdog(struct ifnet *); 168 int re_ifmedia_upd(struct ifnet *); 169 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 170 171 void re_set_jumbo(struct rl_softc *); 172 173 void re_eeprom_putbyte(struct rl_softc *, int); 174 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 175 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 176 177 int re_gmii_readreg(struct device *, int, int); 178 void re_gmii_writereg(struct device *, int, int, int); 179 180 int re_miibus_readreg(struct device *, int, int); 181 void re_miibus_writereg(struct device *, int, int, int); 182 void re_miibus_statchg(struct device *); 183 184 void re_iff(struct rl_softc *); 185 186 void re_setup_hw_im(struct rl_softc *); 187 void re_setup_sim_im(struct rl_softc *); 188 void re_disable_hw_im(struct rl_softc *); 189 void re_disable_sim_im(struct rl_softc *); 190 void re_config_imtype(struct rl_softc *, int); 191 void re_setup_intr(struct rl_softc *, int, int); 192 #ifndef SMALL_KERNEL 193 int re_wol(struct ifnet*, int); 194 #endif 195 196 void in_delayed_cksum(struct mbuf *); 197 198 struct cfdriver re_cd = { 199 0, "re", DV_IFNET 200 }; 201 202 #define EE_SET(x) \ 203 CSR_WRITE_1(sc, RL_EECMD, \ 204 CSR_READ_1(sc, RL_EECMD) | x) 205 206 #define EE_CLR(x) \ 207 CSR_WRITE_1(sc, RL_EECMD, \ 208 CSR_READ_1(sc, RL_EECMD) & ~x) 209 210 #define RL_FRAMELEN(mtu) \ 211 (mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + \ 212 ETHER_VLAN_ENCAP_LEN) 213 214 static const struct re_revision { 215 u_int32_t re_chipid; 216 const char *re_name; 217 } re_revisions[] = { 218 { RL_HWREV_8100, "RTL8100" }, 219 { RL_HWREV_8100E, "RTL8100E" }, 220 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 221 { RL_HWREV_8101, "RTL8101" }, 222 { RL_HWREV_8101E, "RTL8101E" }, 223 { RL_HWREV_8102E, "RTL8102E" }, 224 { RL_HWREV_8106E, "RTL8106E" }, 225 { RL_HWREV_8401E, "RTL8401E" }, 226 { RL_HWREV_8402, "RTL8402" }, 227 { RL_HWREV_8411, "RTL8411" }, 228 { RL_HWREV_8411B, "RTL8411B" }, 229 { RL_HWREV_8102EL, "RTL8102EL" }, 230 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 231 { RL_HWREV_8103E, "RTL8103E" }, 232 { RL_HWREV_8110S, "RTL8110S" }, 233 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 234 { RL_HWREV_8168B_SPIN1, "RTL8168 1" }, 235 { RL_HWREV_8168B_SPIN2, "RTL8168 2" }, 236 { RL_HWREV_8168B_SPIN3, "RTL8168 3" }, 237 { RL_HWREV_8168C, "RTL8168C/8111C" }, 238 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 239 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 240 { RL_HWREV_8168F, "RTL8168F/8111F" }, 241 { RL_HWREV_8168G, "RTL8168G/8111G" }, 242 { RL_HWREV_8168GU, "RTL8168GU/8111GU" }, 243 { RL_HWREV_8168H, "RTL8168H/8111H" }, 244 { RL_HWREV_8105E, "RTL8105E" }, 245 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 246 { RL_HWREV_8168D, "RTL8168D/8111D" }, 247 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 248 { RL_HWREV_8168E, "RTL8168E/8111E" }, 249 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 250 { RL_HWREV_8168EP, "RTL8168EP/8111EP" }, 251 { RL_HWREV_8169, "RTL8169" }, 252 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 253 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 254 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 255 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 256 { RL_HWREV_8169S, "RTL8169S" }, 257 258 { 0, NULL } 259 }; 260 261 262 static inline void 263 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 264 { 265 d->rl_bufaddr_lo = htole32((uint32_t)addr); 266 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 267 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 268 else 269 d->rl_bufaddr_hi = 0; 270 } 271 272 /* 273 * Send a read command and address to the EEPROM, check for ACK. 274 */ 275 void 276 re_eeprom_putbyte(struct rl_softc *sc, int addr) 277 { 278 int d, i; 279 280 d = addr | (RL_9346_READ << sc->rl_eewidth); 281 282 /* 283 * Feed in each bit and strobe the clock. 284 */ 285 286 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 287 if (d & i) 288 EE_SET(RL_EE_DATAIN); 289 else 290 EE_CLR(RL_EE_DATAIN); 291 DELAY(100); 292 EE_SET(RL_EE_CLK); 293 DELAY(150); 294 EE_CLR(RL_EE_CLK); 295 DELAY(100); 296 } 297 } 298 299 /* 300 * Read a word of data stored in the EEPROM at address 'addr.' 301 */ 302 void 303 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 304 { 305 int i; 306 u_int16_t word = 0; 307 308 /* 309 * Send address of word we want to read. 310 */ 311 re_eeprom_putbyte(sc, addr); 312 313 /* 314 * Start reading bits from EEPROM. 315 */ 316 for (i = 0x8000; i; i >>= 1) { 317 EE_SET(RL_EE_CLK); 318 DELAY(100); 319 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 320 word |= i; 321 EE_CLR(RL_EE_CLK); 322 DELAY(100); 323 } 324 325 *dest = word; 326 } 327 328 /* 329 * Read a sequence of words from the EEPROM. 330 */ 331 void 332 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 333 { 334 int i; 335 u_int16_t word = 0, *ptr; 336 337 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 338 339 DELAY(100); 340 341 for (i = 0; i < cnt; i++) { 342 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 343 re_eeprom_getword(sc, off + i, &word); 344 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 345 ptr = (u_int16_t *)(dest + (i * 2)); 346 *ptr = word; 347 } 348 349 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 350 } 351 352 int 353 re_gmii_readreg(struct device *self, int phy, int reg) 354 { 355 struct rl_softc *sc = (struct rl_softc *)self; 356 u_int32_t rval; 357 int i; 358 359 if (phy != 7) 360 return (0); 361 362 /* Let the rgephy driver read the GMEDIASTAT register */ 363 364 if (reg == RL_GMEDIASTAT) { 365 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 366 return (rval); 367 } 368 369 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 370 371 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 372 rval = CSR_READ_4(sc, RL_PHYAR); 373 if (rval & RL_PHYAR_BUSY) 374 break; 375 DELAY(25); 376 } 377 378 if (i == RL_PHY_TIMEOUT) { 379 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 380 return (0); 381 } 382 383 DELAY(20); 384 385 return (rval & RL_PHYAR_PHYDATA); 386 } 387 388 void 389 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 390 { 391 struct rl_softc *sc = (struct rl_softc *)dev; 392 u_int32_t rval; 393 int i; 394 395 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 396 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 397 398 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 399 rval = CSR_READ_4(sc, RL_PHYAR); 400 if (!(rval & RL_PHYAR_BUSY)) 401 break; 402 DELAY(25); 403 } 404 405 if (i == RL_PHY_TIMEOUT) 406 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 407 408 DELAY(20); 409 } 410 411 int 412 re_miibus_readreg(struct device *dev, int phy, int reg) 413 { 414 struct rl_softc *sc = (struct rl_softc *)dev; 415 u_int16_t rval = 0; 416 u_int16_t re8139_reg = 0; 417 int s; 418 419 s = splnet(); 420 421 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 422 rval = re_gmii_readreg(dev, phy, reg); 423 splx(s); 424 return (rval); 425 } 426 427 /* Pretend the internal PHY is only at address 0 */ 428 if (phy) { 429 splx(s); 430 return (0); 431 } 432 switch(reg) { 433 case MII_BMCR: 434 re8139_reg = RL_BMCR; 435 break; 436 case MII_BMSR: 437 re8139_reg = RL_BMSR; 438 break; 439 case MII_ANAR: 440 re8139_reg = RL_ANAR; 441 break; 442 case MII_ANER: 443 re8139_reg = RL_ANER; 444 break; 445 case MII_ANLPAR: 446 re8139_reg = RL_LPAR; 447 break; 448 case MII_PHYIDR1: 449 case MII_PHYIDR2: 450 splx(s); 451 return (0); 452 /* 453 * Allow the rlphy driver to read the media status 454 * register. If we have a link partner which does not 455 * support NWAY, this is the register which will tell 456 * us the results of parallel detection. 457 */ 458 case RL_MEDIASTAT: 459 rval = CSR_READ_1(sc, RL_MEDIASTAT); 460 splx(s); 461 return (rval); 462 default: 463 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 464 splx(s); 465 return (0); 466 } 467 rval = CSR_READ_2(sc, re8139_reg); 468 if (re8139_reg == RL_BMCR) { 469 /* 8139C+ has different bit layout. */ 470 rval &= ~(BMCR_LOOP | BMCR_ISO); 471 } 472 splx(s); 473 return (rval); 474 } 475 476 void 477 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 478 { 479 struct rl_softc *sc = (struct rl_softc *)dev; 480 u_int16_t re8139_reg = 0; 481 int s; 482 483 s = splnet(); 484 485 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 486 re_gmii_writereg(dev, phy, reg, data); 487 splx(s); 488 return; 489 } 490 491 /* Pretend the internal PHY is only at address 0 */ 492 if (phy) { 493 splx(s); 494 return; 495 } 496 switch(reg) { 497 case MII_BMCR: 498 re8139_reg = RL_BMCR; 499 /* 8139C+ has different bit layout. */ 500 data &= ~(BMCR_LOOP | BMCR_ISO); 501 break; 502 case MII_BMSR: 503 re8139_reg = RL_BMSR; 504 break; 505 case MII_ANAR: 506 re8139_reg = RL_ANAR; 507 break; 508 case MII_ANER: 509 re8139_reg = RL_ANER; 510 break; 511 case MII_ANLPAR: 512 re8139_reg = RL_LPAR; 513 break; 514 case MII_PHYIDR1: 515 case MII_PHYIDR2: 516 splx(s); 517 return; 518 break; 519 default: 520 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 521 splx(s); 522 return; 523 } 524 CSR_WRITE_2(sc, re8139_reg, data); 525 splx(s); 526 } 527 528 void 529 re_miibus_statchg(struct device *dev) 530 { 531 struct rl_softc *sc = (struct rl_softc *)dev; 532 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 533 struct mii_data *mii = &sc->sc_mii; 534 535 if ((ifp->if_flags & IFF_RUNNING) == 0) 536 return; 537 538 sc->rl_flags &= ~RL_FLAG_LINK; 539 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 540 (IFM_ACTIVE | IFM_AVALID)) { 541 switch (IFM_SUBTYPE(mii->mii_media_active)) { 542 case IFM_10_T: 543 case IFM_100_TX: 544 sc->rl_flags |= RL_FLAG_LINK; 545 break; 546 case IFM_1000_T: 547 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 548 break; 549 sc->rl_flags |= RL_FLAG_LINK; 550 break; 551 default: 552 break; 553 } 554 } 555 556 /* 557 * Realtek controllers do not provide an interface to 558 * Tx/Rx MACs for resolved speed, duplex and flow-control 559 * parameters. 560 */ 561 } 562 563 void 564 re_iff(struct rl_softc *sc) 565 { 566 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 567 int h = 0; 568 u_int32_t hashes[2]; 569 u_int32_t rxfilt; 570 struct arpcom *ac = &sc->sc_arpcom; 571 struct ether_multi *enm; 572 struct ether_multistep step; 573 574 rxfilt = CSR_READ_4(sc, RL_RXCFG); 575 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 576 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 577 ifp->if_flags &= ~IFF_ALLMULTI; 578 579 /* 580 * Always accept frames destined to our station address. 581 * Always accept broadcast frames. 582 */ 583 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 584 585 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 586 ifp->if_flags |= IFF_ALLMULTI; 587 rxfilt |= RL_RXCFG_RX_MULTI; 588 if (ifp->if_flags & IFF_PROMISC) 589 rxfilt |= RL_RXCFG_RX_ALLPHYS; 590 hashes[0] = hashes[1] = 0xFFFFFFFF; 591 } else { 592 rxfilt |= RL_RXCFG_RX_MULTI; 593 /* Program new filter. */ 594 bzero(hashes, sizeof(hashes)); 595 596 ETHER_FIRST_MULTI(step, ac, enm); 597 while (enm != NULL) { 598 h = ether_crc32_be(enm->enm_addrlo, 599 ETHER_ADDR_LEN) >> 26; 600 601 if (h < 32) 602 hashes[0] |= (1 << h); 603 else 604 hashes[1] |= (1 << (h - 32)); 605 606 ETHER_NEXT_MULTI(step, enm); 607 } 608 } 609 610 /* 611 * For some unfathomable reason, Realtek decided to reverse 612 * the order of the multicast hash registers in the PCI Express 613 * parts. This means we have to write the hash pattern in reverse 614 * order for those devices. 615 */ 616 if (sc->rl_flags & RL_FLAG_PCIE) { 617 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 618 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 619 } else { 620 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 621 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 622 } 623 624 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 625 } 626 627 void 628 re_reset(struct rl_softc *sc) 629 { 630 int i; 631 632 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 633 634 for (i = 0; i < RL_TIMEOUT; i++) { 635 DELAY(10); 636 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 637 break; 638 } 639 if (i == RL_TIMEOUT) 640 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 641 642 if (sc->rl_flags & RL_FLAG_MACRESET) 643 CSR_WRITE_1(sc, RL_LDPS, 1); 644 } 645 646 /* 647 * Attach the interface. Allocate softc structures, do ifmedia 648 * setup and ethernet/BPF attach. 649 */ 650 int 651 re_attach(struct rl_softc *sc, const char *intrstr) 652 { 653 u_char eaddr[ETHER_ADDR_LEN]; 654 u_int16_t as[ETHER_ADDR_LEN / 2]; 655 struct ifnet *ifp; 656 u_int16_t re_did = 0; 657 int error = 0, i; 658 const struct re_revision *rr; 659 const char *re_name = NULL; 660 661 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 662 663 switch (sc->sc_hwrev) { 664 case RL_HWREV_8139CPLUS: 665 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 666 sc->rl_max_mtu = RL_MTU; 667 break; 668 case RL_HWREV_8100E: 669 case RL_HWREV_8100E_SPIN2: 670 case RL_HWREV_8101E: 671 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 672 sc->rl_max_mtu = RL_MTU; 673 break; 674 case RL_HWREV_8103E: 675 sc->rl_flags |= RL_FLAG_MACSLEEP; 676 /* FALLTHROUGH */ 677 case RL_HWREV_8102E: 678 case RL_HWREV_8102EL: 679 case RL_HWREV_8102EL_SPIN1: 680 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 681 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | 682 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 683 sc->rl_max_mtu = RL_MTU; 684 break; 685 case RL_HWREV_8401E: 686 case RL_HWREV_8105E: 687 case RL_HWREV_8105E_SPIN1: 688 case RL_HWREV_8106E: 689 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 690 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 691 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 692 sc->rl_max_mtu = RL_MTU; 693 break; 694 case RL_HWREV_8402: 695 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 696 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 697 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 698 RL_FLAG_CMDSTOP_WAIT_TXQ; 699 sc->rl_max_mtu = RL_MTU; 700 break; 701 case RL_HWREV_8168B_SPIN1: 702 case RL_HWREV_8168B_SPIN2: 703 sc->rl_flags |= RL_FLAG_WOLRXENB; 704 /* FALLTHROUGH */ 705 case RL_HWREV_8168B_SPIN3: 706 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 707 sc->rl_max_mtu = RL_MTU; 708 break; 709 case RL_HWREV_8168C_SPIN2: 710 sc->rl_flags |= RL_FLAG_MACSLEEP; 711 /* FALLTHROUGH */ 712 case RL_HWREV_8168C: 713 case RL_HWREV_8168CP: 714 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 715 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 716 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 717 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 718 break; 719 case RL_HWREV_8168D: 720 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 721 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 722 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 723 RL_FLAG_WOL_MANLINK; 724 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 725 break; 726 case RL_HWREV_8168DP: 727 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 728 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 729 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 730 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 731 break; 732 case RL_HWREV_8168E: 733 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 734 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 735 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 736 RL_FLAG_WOL_MANLINK; 737 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 738 break; 739 case RL_HWREV_8168E_VL: 740 sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR | 741 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 742 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 743 RL_FLAG_WOL_MANLINK; 744 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 745 break; 746 case RL_HWREV_8168F: 747 sc->rl_flags |= RL_FLAG_EARLYOFF; 748 /* FALLTHROUGH */ 749 case RL_HWREV_8411: 750 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 751 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 752 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 753 RL_FLAG_WOL_MANLINK; 754 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 755 break; 756 case RL_HWREV_8168EP: 757 case RL_HWREV_8168G: 758 case RL_HWREV_8168GU: 759 case RL_HWREV_8168H: 760 case RL_HWREV_8411B: 761 if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) { 762 /* RTL8106EUS */ 763 sc->rl_flags |= RL_FLAG_FASTETHER; 764 sc->rl_max_mtu = RL_MTU; 765 } else { 766 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 767 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 768 } 769 770 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 771 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 772 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 773 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 774 break; 775 case RL_HWREV_8169_8110SB: 776 case RL_HWREV_8169_8110SBL: 777 case RL_HWREV_8169_8110SCd: 778 case RL_HWREV_8169_8110SCe: 779 sc->rl_flags |= RL_FLAG_PHYWAKE; 780 /* FALLTHROUGH */ 781 case RL_HWREV_8169: 782 case RL_HWREV_8169S: 783 case RL_HWREV_8110S: 784 sc->rl_flags |= RL_FLAG_MACRESET; 785 sc->rl_max_mtu = RL_JUMBO_MTU_7K; 786 break; 787 default: 788 break; 789 } 790 791 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 792 sc->rl_cfg0 = RL_8139_CFG0; 793 sc->rl_cfg1 = RL_8139_CFG1; 794 sc->rl_cfg2 = 0; 795 sc->rl_cfg3 = RL_8139_CFG3; 796 sc->rl_cfg4 = RL_8139_CFG4; 797 sc->rl_cfg5 = RL_8139_CFG5; 798 } else { 799 sc->rl_cfg0 = RL_CFG0; 800 sc->rl_cfg1 = RL_CFG1; 801 sc->rl_cfg2 = RL_CFG2; 802 sc->rl_cfg3 = RL_CFG3; 803 sc->rl_cfg4 = RL_CFG4; 804 sc->rl_cfg5 = RL_CFG5; 805 } 806 807 /* Reset the adapter. */ 808 re_reset(sc); 809 810 sc->rl_tx_time = 5; /* 125us */ 811 sc->rl_rx_time = 2; /* 50us */ 812 if (sc->rl_flags & RL_FLAG_PCIE) 813 sc->rl_sim_time = 75; /* 75us */ 814 else 815 sc->rl_sim_time = 125; /* 125us */ 816 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 817 818 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 819 sc->rl_bus_speed = 33; /* XXX */ 820 else if (sc->rl_flags & RL_FLAG_PCIE) 821 sc->rl_bus_speed = 125; 822 else { 823 u_int8_t cfg2; 824 825 cfg2 = CSR_READ_1(sc, sc->rl_cfg2); 826 switch (cfg2 & RL_CFG2_PCI_MASK) { 827 case RL_CFG2_PCI_33MHZ: 828 sc->rl_bus_speed = 33; 829 break; 830 case RL_CFG2_PCI_66MHZ: 831 sc->rl_bus_speed = 66; 832 break; 833 default: 834 printf("%s: unknown bus speed, assume 33MHz\n", 835 sc->sc_dev.dv_xname); 836 sc->rl_bus_speed = 33; 837 break; 838 } 839 840 if (cfg2 & RL_CFG2_PCI_64BIT) 841 sc->rl_flags |= RL_FLAG_PCI64; 842 } 843 844 re_config_imtype(sc, sc->rl_imtype); 845 846 if (sc->rl_flags & RL_FLAG_PAR) { 847 /* 848 * XXX Should have a better way to extract station 849 * address from EEPROM. 850 */ 851 for (i = 0; i < ETHER_ADDR_LEN; i++) 852 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 853 } else { 854 sc->rl_eewidth = RL_9356_ADDR_LEN; 855 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 856 if (re_did != 0x8129) 857 sc->rl_eewidth = RL_9346_ADDR_LEN; 858 859 /* 860 * Get station address from the EEPROM. 861 */ 862 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 863 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 864 as[i] = letoh16(as[i]); 865 bcopy(as, eaddr, ETHER_ADDR_LEN); 866 } 867 868 /* 869 * Set RX length mask, TX poll request register 870 * and descriptor count. 871 */ 872 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 873 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 874 sc->rl_txstart = RL_TXSTART; 875 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 876 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 877 sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS; 878 } else { 879 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 880 sc->rl_txstart = RL_GTXSTART; 881 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 882 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 883 sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS; 884 } 885 886 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 887 888 for (rr = re_revisions; rr->re_name != NULL; rr++) { 889 if (rr->re_chipid == sc->sc_hwrev) 890 re_name = rr->re_name; 891 } 892 893 if (re_name == NULL) 894 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 895 else 896 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 897 898 printf(", %s, address %s\n", intrstr, 899 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 900 901 /* Allocate DMA'able memory for the TX ring */ 902 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 903 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 904 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 905 BUS_DMA_ZERO)) != 0) { 906 printf("%s: can't allocate tx listseg, error = %d\n", 907 sc->sc_dev.dv_xname, error); 908 goto fail_0; 909 } 910 911 /* Load the map for the TX ring. */ 912 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 913 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 914 (caddr_t *)&sc->rl_ldata.rl_tx_list, 915 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 916 printf("%s: can't map tx list, error = %d\n", 917 sc->sc_dev.dv_xname, error); 918 goto fail_1; 919 } 920 921 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 922 RL_TX_LIST_SZ(sc), 0, 0, 923 &sc->rl_ldata.rl_tx_list_map)) != 0) { 924 printf("%s: can't create tx list map, error = %d\n", 925 sc->sc_dev.dv_xname, error); 926 goto fail_2; 927 } 928 929 if ((error = bus_dmamap_load(sc->sc_dmat, 930 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 931 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 932 printf("%s: can't load tx list, error = %d\n", 933 sc->sc_dev.dv_xname, error); 934 goto fail_3; 935 } 936 937 /* Create DMA maps for TX buffers */ 938 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 939 error = bus_dmamap_create(sc->sc_dmat, 940 RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs, 941 RL_JUMBO_FRAMELEN, 0, 0, 942 &sc->rl_ldata.rl_txq[i].txq_dmamap); 943 if (error) { 944 printf("%s: can't create DMA map for TX\n", 945 sc->sc_dev.dv_xname); 946 goto fail_4; 947 } 948 } 949 950 /* Allocate DMA'able memory for the RX ring */ 951 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 952 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 953 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 954 BUS_DMA_ZERO)) != 0) { 955 printf("%s: can't allocate rx listnseg, error = %d\n", 956 sc->sc_dev.dv_xname, error); 957 goto fail_4; 958 } 959 960 /* Load the map for the RX ring. */ 961 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 962 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc), 963 (caddr_t *)&sc->rl_ldata.rl_rx_list, 964 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 965 printf("%s: can't map rx list, error = %d\n", 966 sc->sc_dev.dv_xname, error); 967 goto fail_5; 968 969 } 970 971 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1, 972 RL_RX_DMAMEM_SZ(sc), 0, 0, 973 &sc->rl_ldata.rl_rx_list_map)) != 0) { 974 printf("%s: can't create rx list map, error = %d\n", 975 sc->sc_dev.dv_xname, error); 976 goto fail_6; 977 } 978 979 if ((error = bus_dmamap_load(sc->sc_dmat, 980 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 981 RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 982 printf("%s: can't load rx list, error = %d\n", 983 sc->sc_dev.dv_xname, error); 984 goto fail_7; 985 } 986 987 /* Create DMA maps for RX buffers */ 988 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 989 error = bus_dmamap_create(sc->sc_dmat, 990 RL_FRAMELEN(sc->rl_max_mtu), 1, 991 RL_FRAMELEN(sc->rl_max_mtu), 0, 0, 992 &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 993 if (error) { 994 printf("%s: can't create DMA map for RX\n", 995 sc->sc_dev.dv_xname); 996 goto fail_8; 997 } 998 } 999 1000 ifp = &sc->sc_arpcom.ac_if; 1001 ifp->if_softc = sc; 1002 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1003 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1004 ifp->if_xflags = IFXF_MPSAFE; 1005 ifp->if_ioctl = re_ioctl; 1006 ifp->if_qstart = re_start; 1007 ifp->if_watchdog = re_watchdog; 1008 ifp->if_hardmtu = sc->rl_max_mtu; 1009 ifq_set_maxlen(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt); 1010 1011 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 1012 IFCAP_CSUM_UDPv4; 1013 1014 /* 1015 * RTL8168/8111C generates wrong IP checksummed frame if the 1016 * packet has IP options so disable TX IP checksum offloading. 1017 */ 1018 switch (sc->sc_hwrev) { 1019 case RL_HWREV_8168C: 1020 case RL_HWREV_8168C_SPIN2: 1021 case RL_HWREV_8168CP: 1022 break; 1023 default: 1024 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1025 } 1026 1027 #if NVLAN > 0 1028 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1029 #endif 1030 1031 #ifndef SMALL_KERNEL 1032 ifp->if_capabilities |= IFCAP_WOL; 1033 ifp->if_wol = re_wol; 1034 re_wol(ifp, 0); 1035 #endif 1036 timeout_set(&sc->timer_handle, re_tick, sc); 1037 task_set(&sc->rl_start, re_txstart, sc); 1038 1039 /* Take PHY out of power down mode. */ 1040 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1041 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1042 if (sc->sc_hwrev == RL_HWREV_8401E) 1043 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1044 } 1045 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1046 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1047 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1048 } 1049 1050 /* Do MII setup */ 1051 sc->sc_mii.mii_ifp = ifp; 1052 sc->sc_mii.mii_readreg = re_miibus_readreg; 1053 sc->sc_mii.mii_writereg = re_miibus_writereg; 1054 sc->sc_mii.mii_statchg = re_miibus_statchg; 1055 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1056 re_ifmedia_sts); 1057 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1058 MII_OFFSET_ANY, MIIF_DOPAUSE); 1059 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1060 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1061 ifmedia_add(&sc->sc_mii.mii_media, 1062 IFM_ETHER|IFM_NONE, 0, NULL); 1063 ifmedia_set(&sc->sc_mii.mii_media, 1064 IFM_ETHER|IFM_NONE); 1065 } else 1066 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1067 1068 /* 1069 * Call MI attach routine. 1070 */ 1071 if_attach(ifp); 1072 ether_ifattach(ifp); 1073 1074 return (0); 1075 1076 fail_8: 1077 /* Destroy DMA maps for RX buffers. */ 1078 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1079 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1080 bus_dmamap_destroy(sc->sc_dmat, 1081 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1082 } 1083 1084 /* Free DMA'able memory for the RX ring. */ 1085 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1086 fail_7: 1087 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1088 fail_6: 1089 bus_dmamem_unmap(sc->sc_dmat, 1090 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc)); 1091 fail_5: 1092 bus_dmamem_free(sc->sc_dmat, 1093 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1094 1095 fail_4: 1096 /* Destroy DMA maps for TX buffers. */ 1097 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1098 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1099 bus_dmamap_destroy(sc->sc_dmat, 1100 sc->rl_ldata.rl_txq[i].txq_dmamap); 1101 } 1102 1103 /* Free DMA'able memory for the TX ring. */ 1104 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1105 fail_3: 1106 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1107 fail_2: 1108 bus_dmamem_unmap(sc->sc_dmat, 1109 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1110 fail_1: 1111 bus_dmamem_free(sc->sc_dmat, 1112 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1113 fail_0: 1114 return (1); 1115 } 1116 1117 1118 int 1119 re_newbuf(struct rl_softc *sc) 1120 { 1121 struct mbuf *m; 1122 bus_dmamap_t map; 1123 struct rl_desc *d; 1124 struct rl_rxsoft *rxs; 1125 u_int32_t cmdstat; 1126 int error, idx; 1127 1128 m = MCLGETI(NULL, M_DONTWAIT, NULL, RL_FRAMELEN(sc->rl_max_mtu)); 1129 if (!m) 1130 return (ENOBUFS); 1131 1132 /* 1133 * Initialize mbuf length fields and fixup 1134 * alignment so that the frame payload is 1135 * longword aligned on strict alignment archs. 1136 */ 1137 m->m_len = m->m_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu); 1138 m->m_data += RE_ETHER_ALIGN; 1139 1140 idx = sc->rl_ldata.rl_rx_prodidx; 1141 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1142 map = rxs->rxs_dmamap; 1143 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1144 BUS_DMA_READ|BUS_DMA_NOWAIT); 1145 if (error) { 1146 m_freem(m); 1147 return (ENOBUFS); 1148 } 1149 1150 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1151 BUS_DMASYNC_PREREAD); 1152 1153 d = &sc->rl_ldata.rl_rx_list[idx]; 1154 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1155 cmdstat = letoh32(d->rl_cmdstat); 1156 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1157 if (cmdstat & RL_RDESC_STAT_OWN) { 1158 printf("%s: tried to map busy RX descriptor\n", 1159 sc->sc_dev.dv_xname); 1160 m_freem(m); 1161 return (ENOBUFS); 1162 } 1163 1164 rxs->rxs_mbuf = m; 1165 1166 d->rl_vlanctl = 0; 1167 cmdstat = map->dm_segs[0].ds_len; 1168 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1169 cmdstat |= RL_RDESC_CMD_EOR; 1170 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1171 d->rl_cmdstat = htole32(cmdstat); 1172 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1173 cmdstat |= RL_RDESC_CMD_OWN; 1174 d->rl_cmdstat = htole32(cmdstat); 1175 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1176 1177 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1178 1179 return (0); 1180 } 1181 1182 1183 int 1184 re_tx_list_init(struct rl_softc *sc) 1185 { 1186 int i; 1187 1188 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1189 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1190 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1191 } 1192 1193 bus_dmamap_sync(sc->sc_dmat, 1194 sc->rl_ldata.rl_tx_list_map, 0, 1195 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1196 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1197 sc->rl_ldata.rl_txq_prodidx = 0; 1198 sc->rl_ldata.rl_txq_considx = 0; 1199 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1200 sc->rl_ldata.rl_tx_nextfree = 0; 1201 1202 return (0); 1203 } 1204 1205 int 1206 re_rx_list_init(struct rl_softc *sc) 1207 { 1208 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc)); 1209 1210 sc->rl_ldata.rl_rx_prodidx = 0; 1211 sc->rl_ldata.rl_rx_considx = 0; 1212 sc->rl_head = sc->rl_tail = NULL; 1213 1214 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, 1215 sc->rl_ldata.rl_rx_desc_cnt - 1); 1216 re_rx_list_fill(sc); 1217 1218 return (0); 1219 } 1220 1221 void 1222 re_rx_list_fill(struct rl_softc *sc) 1223 { 1224 u_int slots; 1225 1226 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, 1227 sc->rl_ldata.rl_rx_desc_cnt); 1228 slots > 0; slots--) { 1229 if (re_newbuf(sc) == ENOBUFS) 1230 break; 1231 } 1232 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots); 1233 } 1234 1235 /* 1236 * RX handler for C+ and 8169. For the gigE chips, we support 1237 * the reception of jumbo frames that have been fragmented 1238 * across multiple 2K mbuf cluster buffers. 1239 */ 1240 int 1241 re_rxeof(struct rl_softc *sc) 1242 { 1243 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1244 struct mbuf *m; 1245 struct ifnet *ifp; 1246 int i, total_len, rx = 0; 1247 struct rl_desc *cur_rx; 1248 struct rl_rxsoft *rxs; 1249 u_int32_t rxstat, rxvlan; 1250 1251 ifp = &sc->sc_arpcom.ac_if; 1252 1253 for (i = sc->rl_ldata.rl_rx_considx; 1254 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0; 1255 i = RL_NEXT_RX_DESC(sc, i)) { 1256 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1257 RL_RXDESCSYNC(sc, i, 1258 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1259 rxstat = letoh32(cur_rx->rl_cmdstat); 1260 rxvlan = letoh32(cur_rx->rl_vlanctl); 1261 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1262 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1263 break; 1264 total_len = rxstat & sc->rl_rxlenmask; 1265 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1266 m = rxs->rxs_mbuf; 1267 rxs->rxs_mbuf = NULL; 1268 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1); 1269 rx = 1; 1270 1271 /* Invalidate the RX mbuf and unload its map */ 1272 1273 bus_dmamap_sync(sc->sc_dmat, 1274 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1275 BUS_DMASYNC_POSTREAD); 1276 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1277 1278 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 1279 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 1280 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 1281 continue; 1282 } else if (!(rxstat & RL_RDESC_STAT_EOF)) { 1283 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1284 if (sc->rl_head == NULL) 1285 sc->rl_head = sc->rl_tail = m; 1286 else { 1287 m->m_flags &= ~M_PKTHDR; 1288 sc->rl_tail->m_next = m; 1289 sc->rl_tail = m; 1290 } 1291 continue; 1292 } 1293 1294 /* 1295 * NOTE: for the 8139C+, the frame length field 1296 * is always 12 bits in size, but for the gigE chips, 1297 * it is 13 bits (since the max RX frame length is 16K). 1298 * Unfortunately, all 32 bits in the status word 1299 * were already used, so to make room for the extra 1300 * length bit, Realtek took out the 'frame alignment 1301 * error' bit and shifted the other status bits 1302 * over one slot. The OWN, EOR, FS and LS bits are 1303 * still in the same places. We have already extracted 1304 * the frame length and checked the OWN bit, so rather 1305 * than using an alternate bit mapping, we shift the 1306 * status bits one space to the right so we can evaluate 1307 * them using the 8169 status as though it was in the 1308 * same format as that of the 8139C+. 1309 */ 1310 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1311 rxstat >>= 1; 1312 1313 /* 1314 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1315 * set, but if CRC is clear, it will still be a valid frame. 1316 */ 1317 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0 && 1318 !(rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1319 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT))) { 1320 ifp->if_ierrors++; 1321 /* 1322 * If this is part of a multi-fragment packet, 1323 * discard all the pieces. 1324 */ 1325 if (sc->rl_head != NULL) { 1326 m_freem(sc->rl_head); 1327 sc->rl_head = sc->rl_tail = NULL; 1328 } 1329 continue; 1330 } 1331 1332 if (sc->rl_head != NULL) { 1333 m->m_len = total_len % RL_FRAMELEN(sc->rl_max_mtu); 1334 if (m->m_len == 0) 1335 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1336 /* 1337 * Special case: if there's 4 bytes or less 1338 * in this buffer, the mbuf can be discarded: 1339 * the last 4 bytes is the CRC, which we don't 1340 * care about anyway. 1341 */ 1342 if (m->m_len <= ETHER_CRC_LEN) { 1343 sc->rl_tail->m_len -= 1344 (ETHER_CRC_LEN - m->m_len); 1345 m_freem(m); 1346 } else { 1347 m->m_len -= ETHER_CRC_LEN; 1348 m->m_flags &= ~M_PKTHDR; 1349 sc->rl_tail->m_next = m; 1350 } 1351 m = sc->rl_head; 1352 sc->rl_head = sc->rl_tail = NULL; 1353 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1354 } else 1355 m->m_pkthdr.len = m->m_len = 1356 (total_len - ETHER_CRC_LEN); 1357 1358 /* Do RX checksumming */ 1359 1360 if (sc->rl_flags & RL_FLAG_DESCV2) { 1361 /* Check IP header checksum */ 1362 if ((rxvlan & RL_RDESC_IPV4) && 1363 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1364 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1365 1366 /* Check TCP/UDP checksum */ 1367 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1368 (((rxstat & RL_RDESC_STAT_TCP) && 1369 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1370 ((rxstat & RL_RDESC_STAT_UDP) && 1371 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1372 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1373 M_UDP_CSUM_IN_OK; 1374 } else { 1375 /* Check IP header checksum */ 1376 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1377 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1378 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1379 1380 /* Check TCP/UDP checksum */ 1381 if ((RL_TCPPKT(rxstat) && 1382 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1383 (RL_UDPPKT(rxstat) && 1384 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1385 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1386 M_UDP_CSUM_IN_OK; 1387 } 1388 #if NVLAN > 0 1389 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1390 m->m_pkthdr.ether_vtag = 1391 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1392 m->m_flags |= M_VLANTAG; 1393 } 1394 #endif 1395 1396 ml_enqueue(&ml, m); 1397 } 1398 1399 if (ifiq_input(&ifp->if_rcv, &ml)) 1400 if_rxr_livelocked(&sc->rl_ldata.rl_rx_ring); 1401 1402 sc->rl_ldata.rl_rx_considx = i; 1403 re_rx_list_fill(sc); 1404 1405 1406 return (rx); 1407 } 1408 1409 int 1410 re_txeof(struct rl_softc *sc) 1411 { 1412 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1413 struct rl_txq *txq; 1414 uint32_t txstat; 1415 unsigned int prod, cons; 1416 unsigned int idx; 1417 int free = 0; 1418 1419 prod = sc->rl_ldata.rl_txq_prodidx; 1420 cons = sc->rl_ldata.rl_txq_considx; 1421 1422 while (prod != cons) { 1423 txq = &sc->rl_ldata.rl_txq[cons]; 1424 1425 idx = txq->txq_descidx; 1426 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD); 1427 txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 1428 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1429 if (ISSET(txstat, RL_TDESC_CMD_OWN)) { 1430 free = 2; 1431 break; 1432 } 1433 1434 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1435 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1436 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1437 m_freem(txq->txq_mbuf); 1438 txq->txq_mbuf = NULL; 1439 1440 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1441 ifp->if_collisions++; 1442 if (txstat & RL_TDESC_STAT_TXERRSUM) 1443 ifp->if_oerrors++; 1444 1445 cons = RL_NEXT_TX_DESC(sc, idx); 1446 free = 1; 1447 } 1448 1449 if (free == 0) 1450 return (0); 1451 1452 sc->rl_ldata.rl_txq_considx = cons; 1453 1454 /* 1455 * Some chips will ignore a second TX request issued while an 1456 * existing transmission is in progress. If the transmitter goes 1457 * idle but there are still packets waiting to be sent, we need 1458 * to restart the channel here to flush them out. This only 1459 * seems to be required with the PCIe devices. 1460 */ 1461 if (ifq_is_oactive(&ifp->if_snd)) 1462 ifq_restart(&ifp->if_snd); 1463 else if (free == 2) 1464 ifq_serialize(&ifp->if_snd, &sc->rl_start); 1465 else 1466 ifp->if_timer = 0; 1467 1468 return (1); 1469 } 1470 1471 void 1472 re_tick(void *xsc) 1473 { 1474 struct rl_softc *sc = xsc; 1475 struct mii_data *mii; 1476 int s; 1477 1478 mii = &sc->sc_mii; 1479 1480 s = splnet(); 1481 1482 mii_tick(mii); 1483 1484 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1485 re_miibus_statchg(&sc->sc_dev); 1486 1487 splx(s); 1488 1489 timeout_add_sec(&sc->timer_handle, 1); 1490 } 1491 1492 int 1493 re_intr(void *arg) 1494 { 1495 struct rl_softc *sc = arg; 1496 struct ifnet *ifp; 1497 u_int16_t status; 1498 int claimed = 0, rx, tx; 1499 1500 ifp = &sc->sc_arpcom.ac_if; 1501 1502 if (!(ifp->if_flags & IFF_RUNNING)) 1503 return (0); 1504 1505 /* Disable interrupts. */ 1506 CSR_WRITE_2(sc, RL_IMR, 0); 1507 1508 rx = tx = 0; 1509 status = CSR_READ_2(sc, RL_ISR); 1510 /* If the card has gone away the read returns 0xffff. */ 1511 if (status == 0xffff) 1512 return (0); 1513 if (status) 1514 CSR_WRITE_2(sc, RL_ISR, status); 1515 1516 if (status & RL_ISR_TIMEOUT_EXPIRED) 1517 claimed = 1; 1518 1519 if (status & RL_INTRS_CPLUS) { 1520 if (status & 1521 (sc->rl_rx_ack | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW)) { 1522 rx |= re_rxeof(sc); 1523 claimed = 1; 1524 } 1525 1526 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1527 tx |= re_txeof(sc); 1528 claimed = 1; 1529 } 1530 1531 if (status & RL_ISR_SYSTEM_ERR) { 1532 KERNEL_LOCK(); 1533 re_init(ifp); 1534 KERNEL_UNLOCK(); 1535 claimed = 1; 1536 } 1537 } 1538 1539 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1540 if (sc->rl_timerintr) { 1541 if ((tx | rx) == 0) { 1542 /* 1543 * Nothing needs to be processed, fallback 1544 * to use TX/RX interrupts. 1545 */ 1546 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1547 1548 /* 1549 * Recollect, mainly to avoid the possible 1550 * race introduced by changing interrupt 1551 * masks. 1552 */ 1553 re_rxeof(sc); 1554 re_txeof(sc); 1555 } else 1556 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1557 } else if (tx | rx) { 1558 /* 1559 * Assume that using simulated interrupt moderation 1560 * (hardware timer based) could reduce the interrupt 1561 * rate. 1562 */ 1563 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1564 } 1565 } 1566 1567 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1568 1569 return (claimed); 1570 } 1571 1572 int 1573 re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m) 1574 { 1575 struct rl_txq *txq; 1576 bus_dmamap_t map; 1577 int error, seg, nsegs, curidx, lastidx, pad; 1578 int off; 1579 struct ip *ip; 1580 struct rl_desc *d; 1581 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1582 1583 /* 1584 * Set up checksum offload. Note: checksum offload bits must 1585 * appear in all descriptors of a multi-descriptor transmit 1586 * attempt. This is according to testing done with an 8169 1587 * chip. This is a requirement. 1588 */ 1589 1590 /* 1591 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1592 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1593 * RL_TDESC_CMD_UDPCSUM does not take affect. 1594 */ 1595 1596 if ((sc->rl_flags & RL_FLAG_JUMBOV2) && 1597 m->m_pkthdr.len > RL_MTU && 1598 (m->m_pkthdr.csum_flags & 1599 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1600 struct mbuf mh, *mp; 1601 1602 mp = m_getptr(m, ETHER_HDR_LEN, &off); 1603 mh.m_flags = 0; 1604 mh.m_data = mtod(mp, caddr_t) + off; 1605 mh.m_next = mp->m_next; 1606 mh.m_pkthdr.len = mp->m_pkthdr.len - ETHER_HDR_LEN; 1607 mh.m_len = mp->m_len - off; 1608 ip = (struct ip *)mh.m_data; 1609 1610 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1611 ip->ip_sum = in_cksum(&mh, sizeof(struct ip)); 1612 if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) 1613 in_delayed_cksum(&mh); 1614 1615 m->m_pkthdr.csum_flags &= 1616 ~(M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT); 1617 } 1618 1619 if ((m->m_pkthdr.csum_flags & 1620 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1621 if (sc->rl_flags & RL_FLAG_DESCV2) { 1622 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1623 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1624 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1625 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1626 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1627 } else { 1628 csum_flags |= RL_TDESC_CMD_IPCSUM; 1629 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1630 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1631 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1632 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1633 } 1634 } 1635 1636 txq = &sc->rl_ldata.rl_txq[idx]; 1637 map = txq->txq_dmamap; 1638 1639 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1640 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1641 switch (error) { 1642 case 0: 1643 break; 1644 1645 case EFBIG: 1646 if (m_defrag(m, M_DONTWAIT) == 0 && 1647 bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1648 BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0) 1649 break; 1650 1651 /* FALLTHROUGH */ 1652 default: 1653 return (0); 1654 } 1655 1656 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1657 BUS_DMASYNC_PREWRITE); 1658 1659 nsegs = map->dm_nsegs; 1660 pad = 0; 1661 1662 /* 1663 * With some of the RealTek chips, using the checksum offload 1664 * support in conjunction with the autopadding feature results 1665 * in the transmission of corrupt frames. For example, if we 1666 * need to send a really small IP fragment that's less than 60 1667 * bytes in size, and IP header checksumming is enabled, the 1668 * resulting ethernet frame that appears on the wire will 1669 * have garbled payload. To work around this, if TX IP checksum 1670 * offload is enabled, we always manually pad short frames out 1671 * to the minimum ethernet frame size. 1672 */ 1673 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 1674 m->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 1675 (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) != 0) { 1676 pad = 1; 1677 nsegs++; 1678 } 1679 1680 /* 1681 * Set up hardware VLAN tagging. Note: vlan tag info must 1682 * appear in all descriptors of a multi-descriptor 1683 * transmission attempt. 1684 */ 1685 #if NVLAN > 0 1686 if (m->m_flags & M_VLANTAG) 1687 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1688 RL_TDESC_VLANCTL_TAG; 1689 #endif 1690 1691 /* 1692 * Map the segment array into descriptors. Note that we set the 1693 * start-of-frame and end-of-frame markers for either TX or RX, but 1694 * they really only have meaning in the TX case. (In the RX case, 1695 * it's the chip that tells us where packets begin and end.) 1696 * We also keep track of the end of the ring and set the 1697 * end-of-ring bits as needed, and we set the ownership bits 1698 * in all except the very first descriptor. (The caller will 1699 * set this descriptor later when it start transmission or 1700 * reception.) 1701 */ 1702 curidx = idx; 1703 cmdstat = RL_TDESC_CMD_SOF; 1704 1705 for (seg = 0; seg < map->dm_nsegs; seg++) { 1706 d = &sc->rl_ldata.rl_tx_list[curidx]; 1707 1708 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1709 1710 d->rl_vlanctl = htole32(vlanctl); 1711 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1712 cmdstat |= csum_flags | map->dm_segs[seg].ds_len; 1713 1714 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1715 cmdstat |= RL_TDESC_CMD_EOR; 1716 1717 d->rl_cmdstat = htole32(cmdstat); 1718 1719 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1720 1721 lastidx = curidx; 1722 cmdstat = RL_TDESC_CMD_OWN; 1723 curidx = RL_NEXT_TX_DESC(sc, curidx); 1724 } 1725 1726 if (pad) { 1727 d = &sc->rl_ldata.rl_tx_list[curidx]; 1728 1729 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1730 1731 d->rl_vlanctl = htole32(vlanctl); 1732 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1733 cmdstat = csum_flags | 1734 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1735 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1736 1737 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1738 cmdstat |= RL_TDESC_CMD_EOR; 1739 1740 d->rl_cmdstat = htole32(cmdstat); 1741 1742 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1743 1744 lastidx = curidx; 1745 } 1746 1747 /* d is already pointing at the last descriptor */ 1748 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1749 1750 /* Transfer ownership of packet to the chip. */ 1751 d = &sc->rl_ldata.rl_tx_list[idx]; 1752 1753 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1754 d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); 1755 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1756 1757 /* update info of TX queue and descriptors */ 1758 txq->txq_mbuf = m; 1759 txq->txq_descidx = lastidx; 1760 1761 return (nsegs); 1762 } 1763 1764 void 1765 re_txstart(void *xsc) 1766 { 1767 struct rl_softc *sc = xsc; 1768 1769 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1770 } 1771 1772 /* 1773 * Main transmit routine for C+ and gigE NICs. 1774 */ 1775 1776 void 1777 re_start(struct ifqueue *ifq) 1778 { 1779 struct ifnet *ifp = ifq->ifq_if; 1780 struct rl_softc *sc = ifp->if_softc; 1781 struct mbuf *m; 1782 unsigned int idx; 1783 unsigned int free, used; 1784 int post = 0; 1785 1786 if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) { 1787 ifq_purge(ifq); 1788 return; 1789 } 1790 1791 free = sc->rl_ldata.rl_txq_considx; 1792 idx = sc->rl_ldata.rl_txq_prodidx; 1793 if (free <= idx) 1794 free += sc->rl_ldata.rl_tx_desc_cnt; 1795 free -= idx; 1796 1797 for (;;) { 1798 if (sc->rl_ldata.rl_tx_ndescs >= free + 2) { 1799 ifq_set_oactive(ifq); 1800 break; 1801 } 1802 1803 m = ifq_dequeue(ifq); 1804 if (m == NULL) 1805 break; 1806 1807 used = re_encap(sc, idx, m); 1808 if (used == 0) { 1809 m_freem(m); 1810 continue; 1811 } 1812 1813 #if NBPFILTER > 0 1814 if (ifp->if_bpf) 1815 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1816 #endif 1817 1818 KASSERT(used <= free); 1819 free -= used; 1820 1821 idx += used; 1822 if (idx >= sc->rl_ldata.rl_tx_desc_cnt) 1823 idx -= sc->rl_ldata.rl_tx_desc_cnt; 1824 1825 post = 1; 1826 } 1827 1828 if (post == 0) 1829 return; 1830 1831 ifp->if_timer = 5; 1832 sc->rl_ldata.rl_txq_prodidx = idx; 1833 ifq_serialize(ifq, &sc->rl_start); 1834 } 1835 1836 int 1837 re_init(struct ifnet *ifp) 1838 { 1839 struct rl_softc *sc = ifp->if_softc; 1840 u_int16_t cfg; 1841 uint32_t rxcfg; 1842 int s; 1843 union { 1844 u_int32_t align_dummy; 1845 u_char eaddr[ETHER_ADDR_LEN]; 1846 } eaddr; 1847 1848 s = splnet(); 1849 1850 /* 1851 * Cancel pending I/O and free all RX/TX buffers. 1852 */ 1853 re_stop(ifp); 1854 1855 /* Put controller into known state. */ 1856 re_reset(sc); 1857 1858 /* 1859 * Enable C+ RX and TX mode, as well as VLAN stripping and 1860 * RX checksum offload. We must configure the C+ register 1861 * before all others. 1862 */ 1863 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1864 RL_CPLUSCMD_RXCSUM_ENB; 1865 1866 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1867 cfg |= RL_CPLUSCMD_VLANSTRIP; 1868 1869 if (sc->rl_flags & RL_FLAG_MACSTAT) 1870 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1871 else 1872 cfg |= RL_CPLUSCMD_RXENB; 1873 1874 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1875 1876 /* 1877 * Init our MAC address. Even though the chipset 1878 * documentation doesn't mention it, we need to enter "Config 1879 * register write enable" mode to modify the ID registers. 1880 */ 1881 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1882 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1883 CSR_WRITE_4(sc, RL_IDR4, 1884 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1885 CSR_WRITE_4(sc, RL_IDR0, 1886 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1887 /* 1888 * Default on PC Engines APU1 is to have all LEDs off unless 1889 * there is network activity. Override to provide a link status 1890 * LED. 1891 */ 1892 if (sc->sc_hwrev == RL_HWREV_8168E && 1893 hw_vendor != NULL && hw_prod != NULL && 1894 strcmp(hw_vendor, "PC Engines") == 0 && 1895 strcmp(hw_prod, "APU") == 0) { 1896 CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED); 1897 CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4); 1898 } 1899 /* 1900 * Protect config register again 1901 */ 1902 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1903 1904 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1905 re_set_jumbo(sc); 1906 1907 /* 1908 * For C+ mode, initialize the RX descriptors and mbufs. 1909 */ 1910 re_rx_list_init(sc); 1911 re_tx_list_init(sc); 1912 1913 /* 1914 * Load the addresses of the RX and TX lists into the chip. 1915 */ 1916 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1917 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1918 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1919 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1920 1921 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1922 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1923 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1924 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1925 1926 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1927 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1928 ~0x00080000); 1929 1930 /* 1931 * Set the initial TX and RX configuration. 1932 */ 1933 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1934 1935 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1936 1937 rxcfg = RL_RXCFG_CONFIG; 1938 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1939 rxcfg |= RL_RXCFG_EARLYOFF; 1940 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1941 rxcfg |= RL_RXCFG_EARLYOFFV2; 1942 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 1943 1944 /* 1945 * Enable transmit and receive. 1946 */ 1947 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); 1948 1949 /* Program promiscuous mode and multicast filters. */ 1950 re_iff(sc); 1951 1952 /* 1953 * Enable interrupts. 1954 */ 1955 re_setup_intr(sc, 1, sc->rl_imtype); 1956 CSR_WRITE_2(sc, RL_ISR, sc->rl_intrs); 1957 1958 /* Start RX/TX process. */ 1959 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1960 1961 /* 1962 * For 8169 gigE NICs, set the max allowed RX packet 1963 * size so we can receive jumbo frames. 1964 */ 1965 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 1966 if (sc->rl_flags & RL_FLAG_PCIE && 1967 (sc->rl_flags & RL_FLAG_JUMBOV2) == 0) 1968 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 1969 else 1970 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 1971 } 1972 1973 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 1974 RL_CFG1_DRVLOAD); 1975 1976 ifp->if_flags |= IFF_RUNNING; 1977 ifq_clr_oactive(&ifp->if_snd); 1978 1979 splx(s); 1980 1981 sc->rl_flags &= ~RL_FLAG_LINK; 1982 mii_mediachg(&sc->sc_mii); 1983 1984 timeout_add_sec(&sc->timer_handle, 1); 1985 1986 return (0); 1987 } 1988 1989 /* 1990 * Set media options. 1991 */ 1992 int 1993 re_ifmedia_upd(struct ifnet *ifp) 1994 { 1995 struct rl_softc *sc; 1996 1997 sc = ifp->if_softc; 1998 1999 return (mii_mediachg(&sc->sc_mii)); 2000 } 2001 2002 /* 2003 * Report current media status. 2004 */ 2005 void 2006 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2007 { 2008 struct rl_softc *sc; 2009 2010 sc = ifp->if_softc; 2011 2012 mii_pollstat(&sc->sc_mii); 2013 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2014 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2015 } 2016 2017 int 2018 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2019 { 2020 struct rl_softc *sc = ifp->if_softc; 2021 struct ifreq *ifr = (struct ifreq *) data; 2022 int s, error = 0; 2023 2024 s = splnet(); 2025 2026 switch(command) { 2027 case SIOCSIFADDR: 2028 ifp->if_flags |= IFF_UP; 2029 if (!(ifp->if_flags & IFF_RUNNING)) 2030 re_init(ifp); 2031 break; 2032 case SIOCSIFFLAGS: 2033 if (ifp->if_flags & IFF_UP) { 2034 if (ifp->if_flags & IFF_RUNNING) 2035 error = ENETRESET; 2036 else 2037 re_init(ifp); 2038 } else { 2039 if (ifp->if_flags & IFF_RUNNING) 2040 re_stop(ifp); 2041 } 2042 break; 2043 case SIOCGIFMEDIA: 2044 case SIOCSIFMEDIA: 2045 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2046 break; 2047 case SIOCGIFRXR: 2048 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2049 NULL, RL_FRAMELEN(sc->rl_max_mtu), &sc->rl_ldata.rl_rx_ring); 2050 break; 2051 default: 2052 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2053 } 2054 2055 if (error == ENETRESET) { 2056 if (ifp->if_flags & IFF_RUNNING) 2057 re_iff(sc); 2058 error = 0; 2059 } 2060 2061 splx(s); 2062 return (error); 2063 } 2064 2065 void 2066 re_watchdog(struct ifnet *ifp) 2067 { 2068 struct rl_softc *sc; 2069 int s; 2070 2071 sc = ifp->if_softc; 2072 s = splnet(); 2073 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2074 2075 re_init(ifp); 2076 2077 splx(s); 2078 } 2079 2080 /* 2081 * Stop the adapter and free any mbufs allocated to the 2082 * RX and TX lists. 2083 */ 2084 void 2085 re_stop(struct ifnet *ifp) 2086 { 2087 struct rl_softc *sc; 2088 int i; 2089 2090 sc = ifp->if_softc; 2091 2092 ifp->if_timer = 0; 2093 sc->rl_flags &= ~RL_FLAG_LINK; 2094 sc->rl_timerintr = 0; 2095 2096 timeout_del(&sc->timer_handle); 2097 ifp->if_flags &= ~IFF_RUNNING; 2098 2099 /* 2100 * Disable accepting frames to put RX MAC into idle state. 2101 * Otherwise it's possible to get frames while stop command 2102 * execution is in progress and controller can DMA the frame 2103 * to already freed RX buffer during that period. 2104 */ 2105 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 2106 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV | 2107 RL_RXCFG_RX_MULTI)); 2108 2109 if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) { 2110 for (i = RL_TIMEOUT; i > 0; i--) { 2111 if ((CSR_READ_1(sc, sc->rl_txstart) & 2112 RL_TXSTART_START) == 0) 2113 break; 2114 DELAY(20); 2115 } 2116 if (i == 0) 2117 printf("%s: stopping TX poll timed out!\n", 2118 sc->sc_dev.dv_xname); 2119 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2120 } else if (sc->rl_flags & RL_FLAG_CMDSTOP) { 2121 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 2122 RL_CMD_RX_ENB); 2123 if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) { 2124 for (i = RL_TIMEOUT; i > 0; i--) { 2125 if ((CSR_READ_4(sc, RL_TXCFG) & 2126 RL_TXCFG_QUEUE_EMPTY) != 0) 2127 break; 2128 DELAY(100); 2129 } 2130 if (i == 0) 2131 printf("%s: stopping TXQ timed out!\n", 2132 sc->sc_dev.dv_xname); 2133 } 2134 } else 2135 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2136 DELAY(1000); 2137 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2138 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2139 2140 intr_barrier(sc->sc_ih); 2141 ifq_barrier(&ifp->if_snd); 2142 2143 ifq_clr_oactive(&ifp->if_snd); 2144 mii_down(&sc->sc_mii); 2145 2146 if (sc->rl_head != NULL) { 2147 m_freem(sc->rl_head); 2148 sc->rl_head = sc->rl_tail = NULL; 2149 } 2150 2151 /* Free the TX list buffers. */ 2152 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 2153 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2154 bus_dmamap_unload(sc->sc_dmat, 2155 sc->rl_ldata.rl_txq[i].txq_dmamap); 2156 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2157 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2158 } 2159 } 2160 2161 /* Free the RX list buffers. */ 2162 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2163 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2164 bus_dmamap_unload(sc->sc_dmat, 2165 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2166 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2167 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2168 } 2169 } 2170 } 2171 2172 void 2173 re_setup_hw_im(struct rl_softc *sc) 2174 { 2175 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2176 2177 /* 2178 * Interrupt moderation 2179 * 2180 * 0xABCD 2181 * A - unknown (maybe TX related) 2182 * B - TX timer (unit: 25us) 2183 * C - unknown (maybe RX related) 2184 * D - RX timer (unit: 25us) 2185 * 2186 * 2187 * re(4)'s interrupt moderation is actually controlled by 2188 * two variables, like most other NICs (bge, bnx etc.) 2189 * o timer 2190 * o number of packets [P] 2191 * 2192 * The logic relationship between these two variables is 2193 * similar to other NICs too: 2194 * if (timer expire || packets > [P]) 2195 * Interrupt is delivered 2196 * 2197 * Currently we only know how to set 'timer', but not 2198 * 'number of packets', which should be ~30, as far as I 2199 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2200 */ 2201 CSR_WRITE_2(sc, RL_IM, 2202 RL_IM_RXTIME(sc->rl_rx_time) | 2203 RL_IM_TXTIME(sc->rl_tx_time) | 2204 RL_IM_MAGIC); 2205 } 2206 2207 void 2208 re_disable_hw_im(struct rl_softc *sc) 2209 { 2210 if (sc->rl_flags & RL_FLAG_HWIM) 2211 CSR_WRITE_2(sc, RL_IM, 0); 2212 } 2213 2214 void 2215 re_setup_sim_im(struct rl_softc *sc) 2216 { 2217 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2218 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2219 else { 2220 u_int32_t nticks; 2221 2222 /* 2223 * Datasheet says tick decreases at bus speed, 2224 * but it seems the clock runs a little bit 2225 * faster, so we do some compensation here. 2226 */ 2227 nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2228 CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks); 2229 } 2230 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2231 sc->rl_timerintr = 1; 2232 } 2233 2234 void 2235 re_disable_sim_im(struct rl_softc *sc) 2236 { 2237 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2238 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2239 else 2240 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2241 sc->rl_timerintr = 0; 2242 } 2243 2244 void 2245 re_config_imtype(struct rl_softc *sc, int imtype) 2246 { 2247 switch (imtype) { 2248 case RL_IMTYPE_HW: 2249 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2250 /* FALLTHROUGH */ 2251 case RL_IMTYPE_NONE: 2252 sc->rl_intrs = RL_INTRS_CPLUS; 2253 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2254 RL_ISR_RX_OVERRUN; 2255 sc->rl_tx_ack = RL_ISR_TX_OK; 2256 break; 2257 2258 case RL_IMTYPE_SIM: 2259 sc->rl_intrs = RL_INTRS_TIMER; 2260 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2261 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2262 break; 2263 2264 default: 2265 panic("%s: unknown imtype %d", 2266 sc->sc_dev.dv_xname, imtype); 2267 } 2268 } 2269 2270 void 2271 re_set_jumbo(struct rl_softc *sc) 2272 { 2273 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2274 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) | 2275 RL_CFG3_JUMBO_EN0); 2276 2277 switch (sc->sc_hwrev) { 2278 case RL_HWREV_8168DP: 2279 break; 2280 case RL_HWREV_8168E: 2281 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2282 RL_CFG4_8168E_JUMBO_EN1); 2283 break; 2284 default: 2285 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2286 RL_CFG4_JUMBO_EN1); 2287 break; 2288 } 2289 2290 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2291 } 2292 2293 void 2294 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2295 { 2296 re_config_imtype(sc, imtype); 2297 2298 if (enable_intrs) 2299 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2300 else 2301 CSR_WRITE_2(sc, RL_IMR, 0); 2302 2303 switch (imtype) { 2304 case RL_IMTYPE_NONE: 2305 re_disable_sim_im(sc); 2306 re_disable_hw_im(sc); 2307 break; 2308 2309 case RL_IMTYPE_HW: 2310 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2311 re_disable_sim_im(sc); 2312 re_setup_hw_im(sc); 2313 break; 2314 2315 case RL_IMTYPE_SIM: 2316 re_disable_hw_im(sc); 2317 re_setup_sim_im(sc); 2318 break; 2319 2320 default: 2321 panic("%s: unknown imtype %d", 2322 sc->sc_dev.dv_xname, imtype); 2323 } 2324 } 2325 2326 #ifndef SMALL_KERNEL 2327 int 2328 re_wol(struct ifnet *ifp, int enable) 2329 { 2330 struct rl_softc *sc = ifp->if_softc; 2331 u_int8_t val; 2332 2333 if (enable) { 2334 if ((CSR_READ_1(sc, sc->rl_cfg1) & RL_CFG1_PME) == 0) { 2335 printf("%s: power management is disabled, " 2336 "cannot do WOL\n", sc->sc_dev.dv_xname); 2337 return (ENOTSUP); 2338 } 2339 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_AUXPWR) == 0) 2340 printf("%s: no auxiliary power, cannot do WOL from D3 " 2341 "(power-off) state\n", sc->sc_dev.dv_xname); 2342 } 2343 2344 re_iff(sc); 2345 2346 /* Temporarily enable write to configuration registers. */ 2347 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2348 2349 /* Always disable all wake events except magic packet. */ 2350 if (enable) { 2351 val = CSR_READ_1(sc, sc->rl_cfg5); 2352 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2353 RL_CFG5_WOL_BCAST); 2354 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2355 2356 val = CSR_READ_1(sc, sc->rl_cfg3); 2357 val |= RL_CFG3_WOL_MAGIC; 2358 val &= ~RL_CFG3_WOL_LINK; 2359 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2360 } else { 2361 val = CSR_READ_1(sc, sc->rl_cfg5); 2362 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2363 RL_CFG5_WOL_BCAST); 2364 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2365 2366 val = CSR_READ_1(sc, sc->rl_cfg3); 2367 val &= ~(RL_CFG3_WOL_MAGIC | RL_CFG3_WOL_LINK); 2368 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2369 } 2370 2371 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2372 2373 return (0); 2374 } 2375 #endif 2376