1 /* $OpenBSD: re.c,v 1.192 2016/04/20 12:15:24 sthen Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Realtek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support Realtek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the Realtek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 #include <sys/atomic.h> 124 125 #include <machine/bus.h> 126 127 #include <net/if.h> 128 #include <net/if_media.h> 129 130 #include <netinet/in.h> 131 #include <netinet/ip.h> 132 #include <netinet/if_ether.h> 133 134 #if NBPFILTER > 0 135 #include <net/bpf.h> 136 #endif 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 141 #include <dev/pci/pcidevs.h> 142 143 #include <dev/ic/rtl81x9reg.h> 144 #include <dev/ic/revar.h> 145 146 #ifdef RE_DEBUG 147 int redebug = 0; 148 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 149 #else 150 #define DPRINTF(x) 151 #endif 152 153 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 154 155 int re_encap(struct rl_softc *, struct mbuf *, struct rl_txq *, int *); 156 157 int re_newbuf(struct rl_softc *); 158 int re_rx_list_init(struct rl_softc *); 159 void re_rx_list_fill(struct rl_softc *); 160 int re_tx_list_init(struct rl_softc *); 161 int re_rxeof(struct rl_softc *); 162 int re_txeof(struct rl_softc *); 163 void re_tick(void *); 164 void re_start(struct ifnet *); 165 int re_ioctl(struct ifnet *, u_long, caddr_t); 166 void re_watchdog(struct ifnet *); 167 int re_ifmedia_upd(struct ifnet *); 168 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 169 170 void re_set_jumbo(struct rl_softc *); 171 172 void re_eeprom_putbyte(struct rl_softc *, int); 173 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 174 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 175 176 int re_gmii_readreg(struct device *, int, int); 177 void re_gmii_writereg(struct device *, int, int, int); 178 179 int re_miibus_readreg(struct device *, int, int); 180 void re_miibus_writereg(struct device *, int, int, int); 181 void re_miibus_statchg(struct device *); 182 183 void re_iff(struct rl_softc *); 184 185 void re_setup_hw_im(struct rl_softc *); 186 void re_setup_sim_im(struct rl_softc *); 187 void re_disable_hw_im(struct rl_softc *); 188 void re_disable_sim_im(struct rl_softc *); 189 void re_config_imtype(struct rl_softc *, int); 190 void re_setup_intr(struct rl_softc *, int, int); 191 #ifndef SMALL_KERNEL 192 int re_wol(struct ifnet*, int); 193 #endif 194 195 void in_delayed_cksum(struct mbuf *); 196 197 struct cfdriver re_cd = { 198 0, "re", DV_IFNET 199 }; 200 201 extern char *hw_vendor, *hw_prod; 202 203 #define EE_SET(x) \ 204 CSR_WRITE_1(sc, RL_EECMD, \ 205 CSR_READ_1(sc, RL_EECMD) | x) 206 207 #define EE_CLR(x) \ 208 CSR_WRITE_1(sc, RL_EECMD, \ 209 CSR_READ_1(sc, RL_EECMD) & ~x) 210 211 #define RL_FRAMELEN(mtu) \ 212 (mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + \ 213 ETHER_VLAN_ENCAP_LEN) 214 215 static const struct re_revision { 216 u_int32_t re_chipid; 217 const char *re_name; 218 } re_revisions[] = { 219 { RL_HWREV_8100, "RTL8100" }, 220 { RL_HWREV_8100E, "RTL8100E" }, 221 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 222 { RL_HWREV_8101, "RTL8101" }, 223 { RL_HWREV_8101E, "RTL8101E" }, 224 { RL_HWREV_8102E, "RTL8102E" }, 225 { RL_HWREV_8106E, "RTL8106E" }, 226 { RL_HWREV_8401E, "RTL8401E" }, 227 { RL_HWREV_8402, "RTL8402" }, 228 { RL_HWREV_8411, "RTL8411" }, 229 { RL_HWREV_8411B, "RTL8411B" }, 230 { RL_HWREV_8102EL, "RTL8102EL" }, 231 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 232 { RL_HWREV_8103E, "RTL8103E" }, 233 { RL_HWREV_8110S, "RTL8110S" }, 234 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 235 { RL_HWREV_8168B_SPIN1, "RTL8168 1" }, 236 { RL_HWREV_8168B_SPIN2, "RTL8168 2" }, 237 { RL_HWREV_8168B_SPIN3, "RTL8168 3" }, 238 { RL_HWREV_8168C, "RTL8168C/8111C" }, 239 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 240 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 241 { RL_HWREV_8168F, "RTL8168F/8111F" }, 242 { RL_HWREV_8168G, "RTL8168G/8111G" }, 243 { RL_HWREV_8168GU, "RTL8168GU/8111GU" }, 244 { RL_HWREV_8168H, "RTL8168H/8111H" }, 245 { RL_HWREV_8105E, "RTL8105E" }, 246 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 247 { RL_HWREV_8168D, "RTL8168D/8111D" }, 248 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 249 { RL_HWREV_8168E, "RTL8168E/8111E" }, 250 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 251 { RL_HWREV_8168EP, "RTL8168EP/8111EP" }, 252 { RL_HWREV_8169, "RTL8169" }, 253 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 254 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 255 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 256 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 257 { RL_HWREV_8169S, "RTL8169S" }, 258 259 { 0, NULL } 260 }; 261 262 263 static inline void 264 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 265 { 266 d->rl_bufaddr_lo = htole32((uint32_t)addr); 267 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 268 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 269 else 270 d->rl_bufaddr_hi = 0; 271 } 272 273 /* 274 * Send a read command and address to the EEPROM, check for ACK. 275 */ 276 void 277 re_eeprom_putbyte(struct rl_softc *sc, int addr) 278 { 279 int d, i; 280 281 d = addr | (RL_9346_READ << sc->rl_eewidth); 282 283 /* 284 * Feed in each bit and strobe the clock. 285 */ 286 287 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 288 if (d & i) 289 EE_SET(RL_EE_DATAIN); 290 else 291 EE_CLR(RL_EE_DATAIN); 292 DELAY(100); 293 EE_SET(RL_EE_CLK); 294 DELAY(150); 295 EE_CLR(RL_EE_CLK); 296 DELAY(100); 297 } 298 } 299 300 /* 301 * Read a word of data stored in the EEPROM at address 'addr.' 302 */ 303 void 304 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 305 { 306 int i; 307 u_int16_t word = 0; 308 309 /* 310 * Send address of word we want to read. 311 */ 312 re_eeprom_putbyte(sc, addr); 313 314 /* 315 * Start reading bits from EEPROM. 316 */ 317 for (i = 0x8000; i; i >>= 1) { 318 EE_SET(RL_EE_CLK); 319 DELAY(100); 320 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 321 word |= i; 322 EE_CLR(RL_EE_CLK); 323 DELAY(100); 324 } 325 326 *dest = word; 327 } 328 329 /* 330 * Read a sequence of words from the EEPROM. 331 */ 332 void 333 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 334 { 335 int i; 336 u_int16_t word = 0, *ptr; 337 338 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 339 340 DELAY(100); 341 342 for (i = 0; i < cnt; i++) { 343 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 344 re_eeprom_getword(sc, off + i, &word); 345 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 346 ptr = (u_int16_t *)(dest + (i * 2)); 347 *ptr = word; 348 } 349 350 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 351 } 352 353 int 354 re_gmii_readreg(struct device *self, int phy, int reg) 355 { 356 struct rl_softc *sc = (struct rl_softc *)self; 357 u_int32_t rval; 358 int i; 359 360 if (phy != 7) 361 return (0); 362 363 /* Let the rgephy driver read the GMEDIASTAT register */ 364 365 if (reg == RL_GMEDIASTAT) { 366 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 367 return (rval); 368 } 369 370 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 371 372 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 373 rval = CSR_READ_4(sc, RL_PHYAR); 374 if (rval & RL_PHYAR_BUSY) 375 break; 376 DELAY(25); 377 } 378 379 if (i == RL_PHY_TIMEOUT) { 380 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 381 return (0); 382 } 383 384 DELAY(20); 385 386 return (rval & RL_PHYAR_PHYDATA); 387 } 388 389 void 390 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 391 { 392 struct rl_softc *sc = (struct rl_softc *)dev; 393 u_int32_t rval; 394 int i; 395 396 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 397 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 398 399 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 400 rval = CSR_READ_4(sc, RL_PHYAR); 401 if (!(rval & RL_PHYAR_BUSY)) 402 break; 403 DELAY(25); 404 } 405 406 if (i == RL_PHY_TIMEOUT) 407 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 408 409 DELAY(20); 410 } 411 412 int 413 re_miibus_readreg(struct device *dev, int phy, int reg) 414 { 415 struct rl_softc *sc = (struct rl_softc *)dev; 416 u_int16_t rval = 0; 417 u_int16_t re8139_reg = 0; 418 int s; 419 420 s = splnet(); 421 422 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 423 rval = re_gmii_readreg(dev, phy, reg); 424 splx(s); 425 return (rval); 426 } 427 428 /* Pretend the internal PHY is only at address 0 */ 429 if (phy) { 430 splx(s); 431 return (0); 432 } 433 switch(reg) { 434 case MII_BMCR: 435 re8139_reg = RL_BMCR; 436 break; 437 case MII_BMSR: 438 re8139_reg = RL_BMSR; 439 break; 440 case MII_ANAR: 441 re8139_reg = RL_ANAR; 442 break; 443 case MII_ANER: 444 re8139_reg = RL_ANER; 445 break; 446 case MII_ANLPAR: 447 re8139_reg = RL_LPAR; 448 break; 449 case MII_PHYIDR1: 450 case MII_PHYIDR2: 451 splx(s); 452 return (0); 453 /* 454 * Allow the rlphy driver to read the media status 455 * register. If we have a link partner which does not 456 * support NWAY, this is the register which will tell 457 * us the results of parallel detection. 458 */ 459 case RL_MEDIASTAT: 460 rval = CSR_READ_1(sc, RL_MEDIASTAT); 461 splx(s); 462 return (rval); 463 default: 464 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 465 splx(s); 466 return (0); 467 } 468 rval = CSR_READ_2(sc, re8139_reg); 469 if (re8139_reg == RL_BMCR) { 470 /* 8139C+ has different bit layout. */ 471 rval &= ~(BMCR_LOOP | BMCR_ISO); 472 } 473 splx(s); 474 return (rval); 475 } 476 477 void 478 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 479 { 480 struct rl_softc *sc = (struct rl_softc *)dev; 481 u_int16_t re8139_reg = 0; 482 int s; 483 484 s = splnet(); 485 486 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 487 re_gmii_writereg(dev, phy, reg, data); 488 splx(s); 489 return; 490 } 491 492 /* Pretend the internal PHY is only at address 0 */ 493 if (phy) { 494 splx(s); 495 return; 496 } 497 switch(reg) { 498 case MII_BMCR: 499 re8139_reg = RL_BMCR; 500 /* 8139C+ has different bit layout. */ 501 data &= ~(BMCR_LOOP | BMCR_ISO); 502 break; 503 case MII_BMSR: 504 re8139_reg = RL_BMSR; 505 break; 506 case MII_ANAR: 507 re8139_reg = RL_ANAR; 508 break; 509 case MII_ANER: 510 re8139_reg = RL_ANER; 511 break; 512 case MII_ANLPAR: 513 re8139_reg = RL_LPAR; 514 break; 515 case MII_PHYIDR1: 516 case MII_PHYIDR2: 517 splx(s); 518 return; 519 break; 520 default: 521 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 522 splx(s); 523 return; 524 } 525 CSR_WRITE_2(sc, re8139_reg, data); 526 splx(s); 527 } 528 529 void 530 re_miibus_statchg(struct device *dev) 531 { 532 struct rl_softc *sc = (struct rl_softc *)dev; 533 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 534 struct mii_data *mii = &sc->sc_mii; 535 536 if ((ifp->if_flags & IFF_RUNNING) == 0) 537 return; 538 539 sc->rl_flags &= ~RL_FLAG_LINK; 540 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 541 (IFM_ACTIVE | IFM_AVALID)) { 542 switch (IFM_SUBTYPE(mii->mii_media_active)) { 543 case IFM_10_T: 544 case IFM_100_TX: 545 sc->rl_flags |= RL_FLAG_LINK; 546 break; 547 case IFM_1000_T: 548 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 549 break; 550 sc->rl_flags |= RL_FLAG_LINK; 551 break; 552 default: 553 break; 554 } 555 } 556 557 /* 558 * Realtek controllers do not provide an interface to 559 * Tx/Rx MACs for resolved speed, duplex and flow-control 560 * parameters. 561 */ 562 } 563 564 void 565 re_iff(struct rl_softc *sc) 566 { 567 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 568 int h = 0; 569 u_int32_t hashes[2]; 570 u_int32_t rxfilt; 571 struct arpcom *ac = &sc->sc_arpcom; 572 struct ether_multi *enm; 573 struct ether_multistep step; 574 575 rxfilt = CSR_READ_4(sc, RL_RXCFG); 576 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 577 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 578 ifp->if_flags &= ~IFF_ALLMULTI; 579 580 /* 581 * Always accept frames destined to our station address. 582 * Always accept broadcast frames. 583 */ 584 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 585 586 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 587 ifp->if_flags |= IFF_ALLMULTI; 588 rxfilt |= RL_RXCFG_RX_MULTI; 589 if (ifp->if_flags & IFF_PROMISC) 590 rxfilt |= RL_RXCFG_RX_ALLPHYS; 591 hashes[0] = hashes[1] = 0xFFFFFFFF; 592 } else { 593 rxfilt |= RL_RXCFG_RX_MULTI; 594 /* Program new filter. */ 595 bzero(hashes, sizeof(hashes)); 596 597 ETHER_FIRST_MULTI(step, ac, enm); 598 while (enm != NULL) { 599 h = ether_crc32_be(enm->enm_addrlo, 600 ETHER_ADDR_LEN) >> 26; 601 602 if (h < 32) 603 hashes[0] |= (1 << h); 604 else 605 hashes[1] |= (1 << (h - 32)); 606 607 ETHER_NEXT_MULTI(step, enm); 608 } 609 } 610 611 /* 612 * For some unfathomable reason, Realtek decided to reverse 613 * the order of the multicast hash registers in the PCI Express 614 * parts. This means we have to write the hash pattern in reverse 615 * order for those devices. 616 */ 617 if (sc->rl_flags & RL_FLAG_PCIE) { 618 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 619 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 620 } else { 621 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 622 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 623 } 624 625 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 626 } 627 628 void 629 re_reset(struct rl_softc *sc) 630 { 631 int i; 632 633 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 634 635 for (i = 0; i < RL_TIMEOUT; i++) { 636 DELAY(10); 637 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 638 break; 639 } 640 if (i == RL_TIMEOUT) 641 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 642 643 if (sc->rl_flags & RL_FLAG_MACRESET) 644 CSR_WRITE_1(sc, RL_LDPS, 1); 645 } 646 647 #ifdef __armish__ 648 /* 649 * Thecus N2100 doesn't store the full mac address in eeprom 650 * so we read the old mac address from the device before the reset 651 * in hopes that the proper mac address is already there. 652 */ 653 union { 654 u_int32_t eaddr_word[2]; 655 u_char eaddr[ETHER_ADDR_LEN]; 656 } boot_eaddr; 657 int boot_eaddr_valid; 658 #endif /* __armish__ */ 659 /* 660 * Attach the interface. Allocate softc structures, do ifmedia 661 * setup and ethernet/BPF attach. 662 */ 663 int 664 re_attach(struct rl_softc *sc, const char *intrstr) 665 { 666 u_char eaddr[ETHER_ADDR_LEN]; 667 u_int16_t as[ETHER_ADDR_LEN / 2]; 668 struct ifnet *ifp; 669 u_int16_t re_did = 0; 670 int error = 0, i; 671 const struct re_revision *rr; 672 const char *re_name = NULL; 673 int ntxsegs; 674 675 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 676 677 switch (sc->sc_hwrev) { 678 case RL_HWREV_8139CPLUS: 679 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 680 sc->rl_max_mtu = RL_MTU; 681 break; 682 case RL_HWREV_8100E: 683 case RL_HWREV_8100E_SPIN2: 684 case RL_HWREV_8101E: 685 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 686 sc->rl_max_mtu = RL_MTU; 687 break; 688 case RL_HWREV_8103E: 689 sc->rl_flags |= RL_FLAG_MACSLEEP; 690 /* FALLTHROUGH */ 691 case RL_HWREV_8102E: 692 case RL_HWREV_8102EL: 693 case RL_HWREV_8102EL_SPIN1: 694 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 695 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | 696 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 697 sc->rl_max_mtu = RL_MTU; 698 break; 699 case RL_HWREV_8401E: 700 case RL_HWREV_8105E: 701 case RL_HWREV_8105E_SPIN1: 702 case RL_HWREV_8106E: 703 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 704 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 705 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 706 sc->rl_max_mtu = RL_MTU; 707 break; 708 case RL_HWREV_8402: 709 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 710 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 711 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 712 RL_FLAG_CMDSTOP_WAIT_TXQ; 713 sc->rl_max_mtu = RL_MTU; 714 break; 715 case RL_HWREV_8168B_SPIN1: 716 case RL_HWREV_8168B_SPIN2: 717 sc->rl_flags |= RL_FLAG_WOLRXENB; 718 /* FALLTHROUGH */ 719 case RL_HWREV_8168B_SPIN3: 720 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 721 sc->rl_max_mtu = RL_MTU; 722 break; 723 case RL_HWREV_8168C_SPIN2: 724 sc->rl_flags |= RL_FLAG_MACSLEEP; 725 /* FALLTHROUGH */ 726 case RL_HWREV_8168C: 727 case RL_HWREV_8168CP: 728 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 729 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 730 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 731 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 732 break; 733 case RL_HWREV_8168D: 734 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 735 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 736 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 737 RL_FLAG_WOL_MANLINK; 738 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 739 break; 740 case RL_HWREV_8168DP: 741 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 742 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 743 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 744 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 745 break; 746 case RL_HWREV_8168E: 747 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 748 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 749 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 750 RL_FLAG_WOL_MANLINK; 751 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 752 break; 753 case RL_HWREV_8168E_VL: 754 sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR | 755 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 756 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 757 RL_FLAG_WOL_MANLINK; 758 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 759 break; 760 case RL_HWREV_8168F: 761 sc->rl_flags |= RL_FLAG_EARLYOFF; 762 /* FALLTHROUGH */ 763 case RL_HWREV_8411: 764 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 765 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 766 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 767 RL_FLAG_WOL_MANLINK; 768 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 769 break; 770 case RL_HWREV_8168EP: 771 case RL_HWREV_8168G: 772 case RL_HWREV_8168GU: 773 case RL_HWREV_8168H: 774 case RL_HWREV_8411B: 775 if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) { 776 /* RTL8106EUS */ 777 sc->rl_flags |= RL_FLAG_FASTETHER; 778 sc->rl_max_mtu = RL_MTU; 779 } else { 780 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 781 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 782 } 783 784 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 785 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 786 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 787 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 788 break; 789 case RL_HWREV_8169_8110SB: 790 case RL_HWREV_8169_8110SBL: 791 case RL_HWREV_8169_8110SCd: 792 case RL_HWREV_8169_8110SCe: 793 sc->rl_flags |= RL_FLAG_PHYWAKE; 794 /* FALLTHROUGH */ 795 case RL_HWREV_8169: 796 case RL_HWREV_8169S: 797 case RL_HWREV_8110S: 798 sc->rl_flags |= RL_FLAG_MACRESET; 799 sc->rl_max_mtu = RL_JUMBO_MTU_7K; 800 break; 801 default: 802 break; 803 } 804 805 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 806 sc->rl_cfg0 = RL_8139_CFG0; 807 sc->rl_cfg1 = RL_8139_CFG1; 808 sc->rl_cfg2 = 0; 809 sc->rl_cfg3 = RL_8139_CFG3; 810 sc->rl_cfg4 = RL_8139_CFG4; 811 sc->rl_cfg5 = RL_8139_CFG5; 812 } else { 813 sc->rl_cfg0 = RL_CFG0; 814 sc->rl_cfg1 = RL_CFG1; 815 sc->rl_cfg2 = RL_CFG2; 816 sc->rl_cfg3 = RL_CFG3; 817 sc->rl_cfg4 = RL_CFG4; 818 sc->rl_cfg5 = RL_CFG5; 819 } 820 821 /* Reset the adapter. */ 822 re_reset(sc); 823 824 sc->rl_tx_time = 5; /* 125us */ 825 sc->rl_rx_time = 2; /* 50us */ 826 if (sc->rl_flags & RL_FLAG_PCIE) 827 sc->rl_sim_time = 75; /* 75us */ 828 else 829 sc->rl_sim_time = 125; /* 125us */ 830 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 831 832 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 833 sc->rl_bus_speed = 33; /* XXX */ 834 else if (sc->rl_flags & RL_FLAG_PCIE) 835 sc->rl_bus_speed = 125; 836 else { 837 u_int8_t cfg2; 838 839 cfg2 = CSR_READ_1(sc, sc->rl_cfg2); 840 switch (cfg2 & RL_CFG2_PCI_MASK) { 841 case RL_CFG2_PCI_33MHZ: 842 sc->rl_bus_speed = 33; 843 break; 844 case RL_CFG2_PCI_66MHZ: 845 sc->rl_bus_speed = 66; 846 break; 847 default: 848 printf("%s: unknown bus speed, assume 33MHz\n", 849 sc->sc_dev.dv_xname); 850 sc->rl_bus_speed = 33; 851 break; 852 } 853 854 if (cfg2 & RL_CFG2_PCI_64BIT) 855 sc->rl_flags |= RL_FLAG_PCI64; 856 } 857 858 re_config_imtype(sc, sc->rl_imtype); 859 860 if (sc->rl_flags & RL_FLAG_PAR) { 861 /* 862 * XXX Should have a better way to extract station 863 * address from EEPROM. 864 */ 865 for (i = 0; i < ETHER_ADDR_LEN; i++) 866 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 867 } else { 868 sc->rl_eewidth = RL_9356_ADDR_LEN; 869 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 870 if (re_did != 0x8129) 871 sc->rl_eewidth = RL_9346_ADDR_LEN; 872 873 /* 874 * Get station address from the EEPROM. 875 */ 876 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 877 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 878 as[i] = letoh16(as[i]); 879 bcopy(as, eaddr, ETHER_ADDR_LEN); 880 881 #ifdef __armish__ 882 /* 883 * On the Thecus N2100, the MAC address in the EEPROM is 884 * always 00:14:fd:10:00:00. The proper MAC address is 885 * stored in flash. Fortunately RedBoot configures the 886 * proper MAC address (for the first onboard interface) 887 * which we can read from the IDR. 888 */ 889 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && 890 eaddr[2] == 0xfd && eaddr[3] == 0x10 && 891 eaddr[4] == 0x00 && eaddr[5] == 0x00) { 892 if (boot_eaddr_valid == 0) { 893 boot_eaddr.eaddr_word[1] = 894 letoh32(CSR_READ_4(sc, RL_IDR4)); 895 boot_eaddr.eaddr_word[0] = 896 letoh32(CSR_READ_4(sc, RL_IDR0)); 897 boot_eaddr_valid = 1; 898 } 899 900 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr)); 901 eaddr[5] += sc->sc_dev.dv_unit; 902 } 903 #endif 904 } 905 906 /* 907 * Set RX length mask, TX poll request register 908 * and descriptor count. 909 */ 910 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 911 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 912 sc->rl_txstart = RL_TXSTART; 913 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 914 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 915 ntxsegs = RL_8139_NTXSEGS; 916 } else { 917 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 918 sc->rl_txstart = RL_GTXSTART; 919 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 920 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 921 ntxsegs = RL_8169_NTXSEGS; 922 } 923 924 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 925 926 for (rr = re_revisions; rr->re_name != NULL; rr++) { 927 if (rr->re_chipid == sc->sc_hwrev) 928 re_name = rr->re_name; 929 } 930 931 if (re_name == NULL) 932 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 933 else 934 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 935 936 printf(", %s, address %s\n", intrstr, 937 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 938 939 if (sc->rl_ldata.rl_tx_desc_cnt > 940 PAGE_SIZE / sizeof(struct rl_desc)) { 941 sc->rl_ldata.rl_tx_desc_cnt = 942 PAGE_SIZE / sizeof(struct rl_desc); 943 } 944 945 /* Allocate DMA'able memory for the TX ring */ 946 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 947 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 948 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 949 BUS_DMA_ZERO)) != 0) { 950 printf("%s: can't allocate tx listseg, error = %d\n", 951 sc->sc_dev.dv_xname, error); 952 goto fail_0; 953 } 954 955 /* Load the map for the TX ring. */ 956 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 957 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 958 (caddr_t *)&sc->rl_ldata.rl_tx_list, 959 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 960 printf("%s: can't map tx list, error = %d\n", 961 sc->sc_dev.dv_xname, error); 962 goto fail_1; 963 } 964 965 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 966 RL_TX_LIST_SZ(sc), 0, 0, 967 &sc->rl_ldata.rl_tx_list_map)) != 0) { 968 printf("%s: can't create tx list map, error = %d\n", 969 sc->sc_dev.dv_xname, error); 970 goto fail_2; 971 } 972 973 if ((error = bus_dmamap_load(sc->sc_dmat, 974 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 975 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 976 printf("%s: can't load tx list, error = %d\n", 977 sc->sc_dev.dv_xname, error); 978 goto fail_3; 979 } 980 981 /* Create DMA maps for TX buffers */ 982 for (i = 0; i < RL_TX_QLEN; i++) { 983 error = bus_dmamap_create(sc->sc_dmat, 984 RL_JUMBO_FRAMELEN, ntxsegs, RL_JUMBO_FRAMELEN, 985 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap); 986 if (error) { 987 printf("%s: can't create DMA map for TX\n", 988 sc->sc_dev.dv_xname); 989 goto fail_4; 990 } 991 } 992 993 /* Allocate DMA'able memory for the RX ring */ 994 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 995 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 996 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 997 BUS_DMA_ZERO)) != 0) { 998 printf("%s: can't allocate rx listnseg, error = %d\n", 999 sc->sc_dev.dv_xname, error); 1000 goto fail_4; 1001 } 1002 1003 /* Load the map for the RX ring. */ 1004 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 1005 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc), 1006 (caddr_t *)&sc->rl_ldata.rl_rx_list, 1007 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 1008 printf("%s: can't map rx list, error = %d\n", 1009 sc->sc_dev.dv_xname, error); 1010 goto fail_5; 1011 1012 } 1013 1014 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1, 1015 RL_RX_DMAMEM_SZ(sc), 0, 0, 1016 &sc->rl_ldata.rl_rx_list_map)) != 0) { 1017 printf("%s: can't create rx list map, error = %d\n", 1018 sc->sc_dev.dv_xname, error); 1019 goto fail_6; 1020 } 1021 1022 if ((error = bus_dmamap_load(sc->sc_dmat, 1023 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1024 RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 1025 printf("%s: can't load rx list, error = %d\n", 1026 sc->sc_dev.dv_xname, error); 1027 goto fail_7; 1028 } 1029 1030 /* Create DMA maps for RX buffers */ 1031 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1032 error = bus_dmamap_create(sc->sc_dmat, 1033 RL_FRAMELEN(sc->rl_max_mtu), 1, 1034 RL_FRAMELEN(sc->rl_max_mtu), 0, 0, 1035 &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1036 if (error) { 1037 printf("%s: can't create DMA map for RX\n", 1038 sc->sc_dev.dv_xname); 1039 goto fail_8; 1040 } 1041 } 1042 1043 ifp = &sc->sc_arpcom.ac_if; 1044 ifp->if_softc = sc; 1045 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1046 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1047 ifp->if_xflags = IFXF_MPSAFE; 1048 ifp->if_ioctl = re_ioctl; 1049 ifp->if_start = re_start; 1050 ifp->if_watchdog = re_watchdog; 1051 ifp->if_hardmtu = sc->rl_max_mtu; 1052 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); 1053 1054 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 1055 IFCAP_CSUM_UDPv4; 1056 1057 /* 1058 * RTL8168/8111C generates wrong IP checksummed frame if the 1059 * packet has IP options so disable TX IP checksum offloading. 1060 */ 1061 switch (sc->sc_hwrev) { 1062 case RL_HWREV_8168C: 1063 case RL_HWREV_8168C_SPIN2: 1064 case RL_HWREV_8168CP: 1065 break; 1066 default: 1067 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1068 } 1069 1070 #if NVLAN > 0 1071 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1072 #endif 1073 1074 #ifndef SMALL_KERNEL 1075 ifp->if_capabilities |= IFCAP_WOL; 1076 ifp->if_wol = re_wol; 1077 re_wol(ifp, 0); 1078 #endif 1079 timeout_set(&sc->timer_handle, re_tick, sc); 1080 1081 /* Take PHY out of power down mode. */ 1082 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1083 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1084 if (sc->sc_hwrev == RL_HWREV_8401E) 1085 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1086 } 1087 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1088 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1089 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1090 } 1091 1092 /* Do MII setup */ 1093 sc->sc_mii.mii_ifp = ifp; 1094 sc->sc_mii.mii_readreg = re_miibus_readreg; 1095 sc->sc_mii.mii_writereg = re_miibus_writereg; 1096 sc->sc_mii.mii_statchg = re_miibus_statchg; 1097 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1098 re_ifmedia_sts); 1099 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1100 MII_OFFSET_ANY, MIIF_DOPAUSE); 1101 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1102 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1103 ifmedia_add(&sc->sc_mii.mii_media, 1104 IFM_ETHER|IFM_NONE, 0, NULL); 1105 ifmedia_set(&sc->sc_mii.mii_media, 1106 IFM_ETHER|IFM_NONE); 1107 } else 1108 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1109 1110 /* 1111 * Call MI attach routine. 1112 */ 1113 if_attach(ifp); 1114 ether_ifattach(ifp); 1115 1116 return (0); 1117 1118 fail_8: 1119 /* Destroy DMA maps for RX buffers. */ 1120 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1121 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1122 bus_dmamap_destroy(sc->sc_dmat, 1123 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1124 } 1125 1126 /* Free DMA'able memory for the RX ring. */ 1127 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1128 fail_7: 1129 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1130 fail_6: 1131 bus_dmamem_unmap(sc->sc_dmat, 1132 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc)); 1133 fail_5: 1134 bus_dmamem_free(sc->sc_dmat, 1135 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1136 1137 fail_4: 1138 /* Destroy DMA maps for TX buffers. */ 1139 for (i = 0; i < RL_TX_QLEN; i++) { 1140 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1141 bus_dmamap_destroy(sc->sc_dmat, 1142 sc->rl_ldata.rl_txq[i].txq_dmamap); 1143 } 1144 1145 /* Free DMA'able memory for the TX ring. */ 1146 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1147 fail_3: 1148 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1149 fail_2: 1150 bus_dmamem_unmap(sc->sc_dmat, 1151 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1152 fail_1: 1153 bus_dmamem_free(sc->sc_dmat, 1154 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1155 fail_0: 1156 return (1); 1157 } 1158 1159 1160 int 1161 re_newbuf(struct rl_softc *sc) 1162 { 1163 struct mbuf *m; 1164 bus_dmamap_t map; 1165 struct rl_desc *d; 1166 struct rl_rxsoft *rxs; 1167 u_int32_t cmdstat; 1168 int error, idx; 1169 1170 m = MCLGETI(NULL, M_DONTWAIT, NULL, RL_FRAMELEN(sc->rl_max_mtu)); 1171 if (!m) 1172 return (ENOBUFS); 1173 1174 /* 1175 * Initialize mbuf length fields and fixup 1176 * alignment so that the frame payload is 1177 * longword aligned on strict alignment archs. 1178 */ 1179 m->m_len = m->m_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu); 1180 m->m_data += RE_ETHER_ALIGN; 1181 1182 idx = sc->rl_ldata.rl_rx_prodidx; 1183 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1184 map = rxs->rxs_dmamap; 1185 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1186 BUS_DMA_READ|BUS_DMA_NOWAIT); 1187 if (error) { 1188 m_freem(m); 1189 return (ENOBUFS); 1190 } 1191 1192 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1193 BUS_DMASYNC_PREREAD); 1194 1195 d = &sc->rl_ldata.rl_rx_list[idx]; 1196 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1197 cmdstat = letoh32(d->rl_cmdstat); 1198 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1199 if (cmdstat & RL_RDESC_STAT_OWN) { 1200 printf("%s: tried to map busy RX descriptor\n", 1201 sc->sc_dev.dv_xname); 1202 m_freem(m); 1203 return (ENOBUFS); 1204 } 1205 1206 rxs->rxs_mbuf = m; 1207 1208 d->rl_vlanctl = 0; 1209 cmdstat = map->dm_segs[0].ds_len; 1210 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1211 cmdstat |= RL_RDESC_CMD_EOR; 1212 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1213 d->rl_cmdstat = htole32(cmdstat); 1214 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1215 cmdstat |= RL_RDESC_CMD_OWN; 1216 d->rl_cmdstat = htole32(cmdstat); 1217 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1218 1219 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1220 1221 return (0); 1222 } 1223 1224 1225 int 1226 re_tx_list_init(struct rl_softc *sc) 1227 { 1228 int i; 1229 1230 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1231 for (i = 0; i < RL_TX_QLEN; i++) { 1232 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1233 } 1234 1235 bus_dmamap_sync(sc->sc_dmat, 1236 sc->rl_ldata.rl_tx_list_map, 0, 1237 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1238 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1239 sc->rl_ldata.rl_txq_prodidx = 0; 1240 sc->rl_ldata.rl_txq_considx = 0; 1241 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1242 sc->rl_ldata.rl_tx_nextfree = 0; 1243 1244 return (0); 1245 } 1246 1247 int 1248 re_rx_list_init(struct rl_softc *sc) 1249 { 1250 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc)); 1251 1252 sc->rl_ldata.rl_rx_prodidx = 0; 1253 sc->rl_ldata.rl_rx_considx = 0; 1254 sc->rl_head = sc->rl_tail = NULL; 1255 1256 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, sc->rl_ldata.rl_rx_desc_cnt); 1257 re_rx_list_fill(sc); 1258 1259 return (0); 1260 } 1261 1262 void 1263 re_rx_list_fill(struct rl_softc *sc) 1264 { 1265 u_int slots; 1266 1267 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, 1268 sc->rl_ldata.rl_rx_desc_cnt); 1269 slots > 0; slots--) { 1270 if (re_newbuf(sc) == ENOBUFS) 1271 break; 1272 } 1273 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots); 1274 } 1275 1276 /* 1277 * RX handler for C+ and 8169. For the gigE chips, we support 1278 * the reception of jumbo frames that have been fragmented 1279 * across multiple 2K mbuf cluster buffers. 1280 */ 1281 int 1282 re_rxeof(struct rl_softc *sc) 1283 { 1284 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1285 struct mbuf *m; 1286 struct ifnet *ifp; 1287 int i, total_len, rx = 0; 1288 struct rl_desc *cur_rx; 1289 struct rl_rxsoft *rxs; 1290 u_int32_t rxstat, rxvlan; 1291 1292 ifp = &sc->sc_arpcom.ac_if; 1293 1294 for (i = sc->rl_ldata.rl_rx_considx; 1295 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0; 1296 i = RL_NEXT_RX_DESC(sc, i)) { 1297 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1298 RL_RXDESCSYNC(sc, i, 1299 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1300 rxstat = letoh32(cur_rx->rl_cmdstat); 1301 rxvlan = letoh32(cur_rx->rl_vlanctl); 1302 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1303 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1304 break; 1305 total_len = rxstat & sc->rl_rxlenmask; 1306 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1307 m = rxs->rxs_mbuf; 1308 rxs->rxs_mbuf = NULL; 1309 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1); 1310 rx = 1; 1311 1312 /* Invalidate the RX mbuf and unload its map */ 1313 1314 bus_dmamap_sync(sc->sc_dmat, 1315 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1316 BUS_DMASYNC_POSTREAD); 1317 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1318 1319 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 1320 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 1321 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 1322 continue; 1323 } else if (!(rxstat & RL_RDESC_STAT_EOF)) { 1324 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1325 if (sc->rl_head == NULL) 1326 sc->rl_head = sc->rl_tail = m; 1327 else { 1328 m->m_flags &= ~M_PKTHDR; 1329 sc->rl_tail->m_next = m; 1330 sc->rl_tail = m; 1331 } 1332 continue; 1333 } 1334 1335 /* 1336 * NOTE: for the 8139C+, the frame length field 1337 * is always 12 bits in size, but for the gigE chips, 1338 * it is 13 bits (since the max RX frame length is 16K). 1339 * Unfortunately, all 32 bits in the status word 1340 * were already used, so to make room for the extra 1341 * length bit, Realtek took out the 'frame alignment 1342 * error' bit and shifted the other status bits 1343 * over one slot. The OWN, EOR, FS and LS bits are 1344 * still in the same places. We have already extracted 1345 * the frame length and checked the OWN bit, so rather 1346 * than using an alternate bit mapping, we shift the 1347 * status bits one space to the right so we can evaluate 1348 * them using the 8169 status as though it was in the 1349 * same format as that of the 8139C+. 1350 */ 1351 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1352 rxstat >>= 1; 1353 1354 /* 1355 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1356 * set, but if CRC is clear, it will still be a valid frame. 1357 */ 1358 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0 && 1359 !(rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1360 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT))) { 1361 ifp->if_ierrors++; 1362 /* 1363 * If this is part of a multi-fragment packet, 1364 * discard all the pieces. 1365 */ 1366 if (sc->rl_head != NULL) { 1367 m_freem(sc->rl_head); 1368 sc->rl_head = sc->rl_tail = NULL; 1369 } 1370 continue; 1371 } 1372 1373 if (sc->rl_head != NULL) { 1374 m->m_len = total_len % RL_FRAMELEN(sc->rl_max_mtu); 1375 if (m->m_len == 0) 1376 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1377 /* 1378 * Special case: if there's 4 bytes or less 1379 * in this buffer, the mbuf can be discarded: 1380 * the last 4 bytes is the CRC, which we don't 1381 * care about anyway. 1382 */ 1383 if (m->m_len <= ETHER_CRC_LEN) { 1384 sc->rl_tail->m_len -= 1385 (ETHER_CRC_LEN - m->m_len); 1386 m_freem(m); 1387 } else { 1388 m->m_len -= ETHER_CRC_LEN; 1389 m->m_flags &= ~M_PKTHDR; 1390 sc->rl_tail->m_next = m; 1391 } 1392 m = sc->rl_head; 1393 sc->rl_head = sc->rl_tail = NULL; 1394 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1395 } else 1396 m->m_pkthdr.len = m->m_len = 1397 (total_len - ETHER_CRC_LEN); 1398 1399 /* Do RX checksumming */ 1400 1401 if (sc->rl_flags & RL_FLAG_DESCV2) { 1402 /* Check IP header checksum */ 1403 if ((rxvlan & RL_RDESC_IPV4) && 1404 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1405 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1406 1407 /* Check TCP/UDP checksum */ 1408 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1409 (((rxstat & RL_RDESC_STAT_TCP) && 1410 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1411 ((rxstat & RL_RDESC_STAT_UDP) && 1412 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1413 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1414 M_UDP_CSUM_IN_OK; 1415 } else { 1416 /* Check IP header checksum */ 1417 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1418 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1419 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1420 1421 /* Check TCP/UDP checksum */ 1422 if ((RL_TCPPKT(rxstat) && 1423 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1424 (RL_UDPPKT(rxstat) && 1425 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1426 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1427 M_UDP_CSUM_IN_OK; 1428 } 1429 #if NVLAN > 0 1430 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1431 m->m_pkthdr.ether_vtag = 1432 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1433 m->m_flags |= M_VLANTAG; 1434 } 1435 #endif 1436 1437 ml_enqueue(&ml, m); 1438 } 1439 1440 sc->rl_ldata.rl_rx_considx = i; 1441 re_rx_list_fill(sc); 1442 1443 if_input(ifp, &ml); 1444 1445 return (rx); 1446 } 1447 1448 int 1449 re_txeof(struct rl_softc *sc) 1450 { 1451 struct ifnet *ifp; 1452 struct rl_txq *txq; 1453 uint32_t txstat; 1454 int idx, descidx, tx_free, freed = 0; 1455 1456 ifp = &sc->sc_arpcom.ac_if; 1457 1458 for (idx = sc->rl_ldata.rl_txq_considx; 1459 idx != sc->rl_ldata.rl_txq_prodidx; idx = RL_NEXT_TXQ(sc, idx)) { 1460 txq = &sc->rl_ldata.rl_txq[idx]; 1461 1462 descidx = txq->txq_descidx; 1463 RL_TXDESCSYNC(sc, descidx, 1464 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1465 txstat = 1466 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); 1467 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1468 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); 1469 if (txstat & RL_TDESC_CMD_OWN) 1470 break; 1471 1472 freed += txq->txq_nsegs; 1473 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1474 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1475 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1476 m_freem(txq->txq_mbuf); 1477 txq->txq_mbuf = NULL; 1478 1479 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1480 ifp->if_collisions++; 1481 if (txstat & RL_TDESC_STAT_TXERRSUM) 1482 ifp->if_oerrors++; 1483 else 1484 ifp->if_opackets++; 1485 } 1486 1487 if (freed == 0) 1488 return (0); 1489 1490 tx_free = atomic_add_int_nv(&sc->rl_ldata.rl_tx_free, freed); 1491 KASSERT(tx_free <= sc->rl_ldata.rl_tx_desc_cnt); 1492 1493 sc->rl_ldata.rl_txq_considx = idx; 1494 1495 /* 1496 * Some chips will ignore a second TX request issued while an 1497 * existing transmission is in progress. If the transmitter goes 1498 * idle but there are still packets waiting to be sent, we need 1499 * to restart the channel here to flush them out. This only 1500 * seems to be required with the PCIe devices. 1501 */ 1502 if (ifq_is_oactive(&ifp->if_snd)) 1503 ifq_restart(&ifp->if_snd); 1504 else if (tx_free < sc->rl_ldata.rl_tx_desc_cnt) 1505 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1506 else 1507 ifp->if_timer = 0; 1508 1509 return (1); 1510 } 1511 1512 void 1513 re_tick(void *xsc) 1514 { 1515 struct rl_softc *sc = xsc; 1516 struct mii_data *mii; 1517 int s; 1518 1519 mii = &sc->sc_mii; 1520 1521 s = splnet(); 1522 1523 mii_tick(mii); 1524 1525 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1526 re_miibus_statchg(&sc->sc_dev); 1527 1528 splx(s); 1529 1530 timeout_add_sec(&sc->timer_handle, 1); 1531 } 1532 1533 int 1534 re_intr(void *arg) 1535 { 1536 struct rl_softc *sc = arg; 1537 struct ifnet *ifp; 1538 u_int16_t status; 1539 int claimed = 0, rx, tx; 1540 1541 ifp = &sc->sc_arpcom.ac_if; 1542 1543 if (!(ifp->if_flags & IFF_RUNNING)) 1544 return (0); 1545 1546 /* Disable interrupts. */ 1547 CSR_WRITE_2(sc, RL_IMR, 0); 1548 1549 rx = tx = 0; 1550 status = CSR_READ_2(sc, RL_ISR); 1551 /* If the card has gone away the read returns 0xffff. */ 1552 if (status == 0xffff) 1553 return (0); 1554 if (status) 1555 CSR_WRITE_2(sc, RL_ISR, status); 1556 1557 if (status & RL_ISR_TIMEOUT_EXPIRED) 1558 claimed = 1; 1559 1560 if (status & RL_INTRS_CPLUS) { 1561 if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) { 1562 rx |= re_rxeof(sc); 1563 claimed = 1; 1564 } 1565 1566 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1567 tx |= re_txeof(sc); 1568 claimed = 1; 1569 } 1570 1571 if (status & RL_ISR_SYSTEM_ERR) { 1572 KERNEL_LOCK(); 1573 re_init(ifp); 1574 KERNEL_UNLOCK(); 1575 claimed = 1; 1576 } 1577 } 1578 1579 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1580 if (sc->rl_timerintr) { 1581 if ((tx | rx) == 0) { 1582 /* 1583 * Nothing needs to be processed, fallback 1584 * to use TX/RX interrupts. 1585 */ 1586 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1587 1588 /* 1589 * Recollect, mainly to avoid the possible 1590 * race introduced by changing interrupt 1591 * masks. 1592 */ 1593 re_rxeof(sc); 1594 re_txeof(sc); 1595 } else 1596 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1597 } else if (tx | rx) { 1598 /* 1599 * Assume that using simulated interrupt moderation 1600 * (hardware timer based) could reduce the interrupt 1601 * rate. 1602 */ 1603 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1604 } 1605 } 1606 1607 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1608 1609 return (claimed); 1610 } 1611 1612 int 1613 re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) 1614 { 1615 bus_dmamap_t map; 1616 struct mbuf *mp, mh; 1617 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; 1618 int off; 1619 struct ip *ip; 1620 struct rl_desc *d; 1621 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1622 1623 /* 1624 * Set up checksum offload. Note: checksum offload bits must 1625 * appear in all descriptors of a multi-descriptor transmit 1626 * attempt. This is according to testing done with an 8169 1627 * chip. This is a requirement. 1628 */ 1629 1630 /* 1631 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1632 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1633 * RL_TDESC_CMD_UDPCSUM does not take affect. 1634 */ 1635 1636 if ((sc->rl_flags & RL_FLAG_JUMBOV2) && 1637 m->m_pkthdr.len > RL_MTU && 1638 (m->m_pkthdr.csum_flags & 1639 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1640 mp = m_getptr(m, ETHER_HDR_LEN, &off); 1641 mh.m_flags = 0; 1642 mh.m_data = mtod(mp, caddr_t) + off; 1643 mh.m_next = mp->m_next; 1644 mh.m_pkthdr.len = mp->m_pkthdr.len - ETHER_HDR_LEN; 1645 mh.m_len = mp->m_len - off; 1646 ip = (struct ip *)mh.m_data; 1647 1648 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1649 ip->ip_sum = in_cksum(&mh, sizeof(struct ip)); 1650 if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) 1651 in_delayed_cksum(&mh); 1652 1653 m->m_pkthdr.csum_flags &= 1654 ~(M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT); 1655 } 1656 1657 if ((m->m_pkthdr.csum_flags & 1658 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1659 if (sc->rl_flags & RL_FLAG_DESCV2) { 1660 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1661 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1662 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1663 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1664 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1665 } else { 1666 csum_flags |= RL_TDESC_CMD_IPCSUM; 1667 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1668 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1669 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1670 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1671 } 1672 } 1673 1674 map = txq->txq_dmamap; 1675 1676 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1677 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1678 switch (error) { 1679 case 0: 1680 break; 1681 1682 case EFBIG: 1683 if (m_defrag(m, M_DONTWAIT) == 0 && 1684 bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1685 BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0) 1686 break; 1687 1688 /* FALLTHROUGH */ 1689 default: 1690 return (ENOMEM); 1691 } 1692 1693 nsegs = map->dm_nsegs; 1694 pad = 0; 1695 1696 /* 1697 * With some of the RealTek chips, using the checksum offload 1698 * support in conjunction with the autopadding feature results 1699 * in the transmission of corrupt frames. For example, if we 1700 * need to send a really small IP fragment that's less than 60 1701 * bytes in size, and IP header checksumming is enabled, the 1702 * resulting ethernet frame that appears on the wire will 1703 * have garbled payload. To work around this, if TX IP checksum 1704 * offload is enabled, we always manually pad short frames out 1705 * to the minimum ethernet frame size. 1706 */ 1707 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 1708 m->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 1709 (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) != 0) { 1710 pad = 1; 1711 nsegs++; 1712 } 1713 1714 if (*used + nsegs + 1 >= sc->rl_ldata.rl_tx_free) { 1715 error = ENOBUFS; 1716 goto fail_unload; 1717 } 1718 1719 /* 1720 * Make sure that the caches are synchronized before we 1721 * ask the chip to start DMA for the packet data. 1722 */ 1723 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1724 BUS_DMASYNC_PREWRITE); 1725 1726 /* 1727 * Set up hardware VLAN tagging. Note: vlan tag info must 1728 * appear in all descriptors of a multi-descriptor 1729 * transmission attempt. 1730 */ 1731 #if NVLAN > 0 1732 if (m->m_flags & M_VLANTAG) 1733 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1734 RL_TDESC_VLANCTL_TAG; 1735 #endif 1736 1737 /* 1738 * Map the segment array into descriptors. Note that we set the 1739 * start-of-frame and end-of-frame markers for either TX or RX, but 1740 * they really only have meaning in the TX case. (In the RX case, 1741 * it's the chip that tells us where packets begin and end.) 1742 * We also keep track of the end of the ring and set the 1743 * end-of-ring bits as needed, and we set the ownership bits 1744 * in all except the very first descriptor. (The caller will 1745 * set this descriptor later when it start transmission or 1746 * reception.) 1747 */ 1748 curidx = startidx = sc->rl_ldata.rl_tx_nextfree; 1749 lastidx = -1; 1750 for (seg = 0; seg < map->dm_nsegs; 1751 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { 1752 d = &sc->rl_ldata.rl_tx_list[curidx]; 1753 RL_TXDESCSYNC(sc, curidx, 1754 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1755 cmdstat = letoh32(d->rl_cmdstat); 1756 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); 1757 if (cmdstat & RL_TDESC_STAT_OWN) { 1758 printf("%s: tried to map busy TX descriptor\n", 1759 sc->sc_dev.dv_xname); 1760 for (; seg > 0; seg --) { 1761 uidx = (curidx + sc->rl_ldata.rl_tx_desc_cnt - 1762 seg) % sc->rl_ldata.rl_tx_desc_cnt; 1763 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; 1764 RL_TXDESCSYNC(sc, uidx, 1765 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1766 } 1767 error = EBUSY; 1768 goto fail_unload; 1769 } 1770 1771 d->rl_vlanctl = htole32(vlanctl); 1772 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1773 cmdstat = csum_flags | map->dm_segs[seg].ds_len; 1774 if (seg == 0) 1775 cmdstat |= RL_TDESC_CMD_SOF; 1776 else 1777 cmdstat |= RL_TDESC_CMD_OWN; 1778 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1779 cmdstat |= RL_TDESC_CMD_EOR; 1780 if (seg == nsegs - 1) { 1781 cmdstat |= RL_TDESC_CMD_EOF; 1782 lastidx = curidx; 1783 } 1784 d->rl_cmdstat = htole32(cmdstat); 1785 RL_TXDESCSYNC(sc, curidx, 1786 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1787 } 1788 if (pad) { 1789 d = &sc->rl_ldata.rl_tx_list[curidx]; 1790 d->rl_vlanctl = htole32(vlanctl); 1791 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1792 cmdstat = csum_flags | 1793 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1794 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1795 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1796 cmdstat |= RL_TDESC_CMD_EOR; 1797 d->rl_cmdstat = htole32(cmdstat); 1798 RL_TXDESCSYNC(sc, curidx, 1799 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1800 lastidx = curidx; 1801 curidx = RL_NEXT_TX_DESC(sc, curidx); 1802 } 1803 KASSERT(lastidx != -1); 1804 1805 /* Transfer ownership of packet to the chip. */ 1806 1807 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= 1808 htole32(RL_TDESC_CMD_OWN); 1809 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1810 1811 /* update info of TX queue and descriptors */ 1812 txq->txq_mbuf = m; 1813 txq->txq_descidx = lastidx; 1814 txq->txq_nsegs = nsegs; 1815 1816 sc->rl_ldata.rl_tx_nextfree = curidx; 1817 1818 *used += nsegs; 1819 1820 return (0); 1821 1822 fail_unload: 1823 bus_dmamap_unload(sc->sc_dmat, map); 1824 1825 return (error); 1826 } 1827 1828 /* 1829 * Main transmit routine for C+ and gigE NICs. 1830 */ 1831 1832 void 1833 re_start(struct ifnet *ifp) 1834 { 1835 struct rl_softc *sc = ifp->if_softc; 1836 struct mbuf *m; 1837 int idx, used = 0, txq_free, error; 1838 1839 if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) { 1840 IFQ_PURGE(&ifp->if_snd); 1841 return; 1842 } 1843 1844 txq_free = sc->rl_ldata.rl_txq_considx; 1845 idx = sc->rl_ldata.rl_txq_prodidx; 1846 if (txq_free <= idx) 1847 txq_free += RL_TX_QLEN; 1848 txq_free -= idx; 1849 1850 for (;;) { 1851 if (txq_free <= 1) { 1852 ifq_set_oactive(&ifp->if_snd); 1853 break; 1854 } 1855 1856 m = ifq_deq_begin(&ifp->if_snd); 1857 if (m == NULL) 1858 break; 1859 1860 error = re_encap(sc, m, &sc->rl_ldata.rl_txq[idx], &used); 1861 if (error == 0) 1862 ifq_deq_commit(&ifp->if_snd, m); 1863 else if (error == ENOBUFS) { 1864 ifq_deq_rollback(&ifp->if_snd, m); 1865 ifq_set_oactive(&ifp->if_snd); 1866 break; 1867 } else { 1868 ifq_deq_commit(&ifp->if_snd, m); 1869 m_freem(m); 1870 ifp->if_oerrors++; 1871 continue; 1872 } 1873 1874 #if NBPFILTER > 0 1875 if (ifp->if_bpf) 1876 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1877 #endif 1878 idx = RL_NEXT_TXQ(sc, idx); 1879 txq_free--; 1880 } 1881 1882 if (used == 0) 1883 return; 1884 1885 ifp->if_timer = 5; 1886 atomic_sub_int(&sc->rl_ldata.rl_tx_free, used); 1887 KASSERT(sc->rl_ldata.rl_tx_free >= 0); 1888 1889 sc->rl_ldata.rl_txq_prodidx = idx; 1890 1891 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1892 } 1893 1894 int 1895 re_init(struct ifnet *ifp) 1896 { 1897 struct rl_softc *sc = ifp->if_softc; 1898 u_int16_t cfg; 1899 uint32_t rxcfg; 1900 int s; 1901 union { 1902 u_int32_t align_dummy; 1903 u_char eaddr[ETHER_ADDR_LEN]; 1904 } eaddr; 1905 1906 s = splnet(); 1907 1908 /* 1909 * Cancel pending I/O and free all RX/TX buffers. 1910 */ 1911 re_stop(ifp); 1912 1913 /* Put controller into known state. */ 1914 re_reset(sc); 1915 1916 /* 1917 * Enable C+ RX and TX mode, as well as VLAN stripping and 1918 * RX checksum offload. We must configure the C+ register 1919 * before all others. 1920 */ 1921 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1922 RL_CPLUSCMD_RXCSUM_ENB; 1923 1924 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1925 cfg |= RL_CPLUSCMD_VLANSTRIP; 1926 1927 if (sc->rl_flags & RL_FLAG_MACSTAT) 1928 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1929 else 1930 cfg |= RL_CPLUSCMD_RXENB; 1931 1932 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1933 1934 /* 1935 * Init our MAC address. Even though the chipset 1936 * documentation doesn't mention it, we need to enter "Config 1937 * register write enable" mode to modify the ID registers. 1938 */ 1939 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1940 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1941 CSR_WRITE_4(sc, RL_IDR4, 1942 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1943 CSR_WRITE_4(sc, RL_IDR0, 1944 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1945 /* 1946 * Default on PC Engines APU1 is to have all LEDs off unless 1947 * there is network activity. Override to provide a link status 1948 * LED. 1949 */ 1950 if (sc->sc_hwrev == RL_HWREV_8168E && 1951 hw_vendor != NULL && hw_prod != NULL && 1952 strcmp(hw_vendor, "PC Engines") == 0 && 1953 strcmp(hw_prod, "APU") == 0) { 1954 CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED); 1955 CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4); 1956 } 1957 /* 1958 * Protect config register again 1959 */ 1960 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1961 1962 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1963 re_set_jumbo(sc); 1964 1965 /* 1966 * For C+ mode, initialize the RX descriptors and mbufs. 1967 */ 1968 re_rx_list_init(sc); 1969 re_tx_list_init(sc); 1970 1971 /* 1972 * Load the addresses of the RX and TX lists into the chip. 1973 */ 1974 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1975 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1976 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1977 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1978 1979 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1980 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1981 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1982 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1983 1984 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1985 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1986 ~0x00080000); 1987 1988 /* 1989 * Set the initial TX and RX configuration. 1990 */ 1991 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1992 1993 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1994 1995 rxcfg = RL_RXCFG_CONFIG; 1996 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1997 rxcfg |= RL_RXCFG_EARLYOFF; 1998 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1999 rxcfg |= RL_RXCFG_EARLYOFFV2; 2000 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2001 2002 /* 2003 * Enable transmit and receive. 2004 */ 2005 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); 2006 2007 /* Program promiscuous mode and multicast filters. */ 2008 re_iff(sc); 2009 2010 /* 2011 * Enable interrupts. 2012 */ 2013 re_setup_intr(sc, 1, sc->rl_imtype); 2014 CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype); 2015 2016 /* Start RX/TX process. */ 2017 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2018 2019 /* 2020 * For 8169 gigE NICs, set the max allowed RX packet 2021 * size so we can receive jumbo frames. 2022 */ 2023 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 2024 if (sc->rl_flags & RL_FLAG_PCIE && 2025 (sc->rl_flags & RL_FLAG_JUMBOV2) == 0) 2026 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 2027 else 2028 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2029 } 2030 2031 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 2032 RL_CFG1_DRVLOAD); 2033 2034 ifp->if_flags |= IFF_RUNNING; 2035 ifq_clr_oactive(&ifp->if_snd); 2036 2037 splx(s); 2038 2039 sc->rl_flags &= ~RL_FLAG_LINK; 2040 mii_mediachg(&sc->sc_mii); 2041 2042 timeout_add_sec(&sc->timer_handle, 1); 2043 2044 return (0); 2045 } 2046 2047 /* 2048 * Set media options. 2049 */ 2050 int 2051 re_ifmedia_upd(struct ifnet *ifp) 2052 { 2053 struct rl_softc *sc; 2054 2055 sc = ifp->if_softc; 2056 2057 return (mii_mediachg(&sc->sc_mii)); 2058 } 2059 2060 /* 2061 * Report current media status. 2062 */ 2063 void 2064 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2065 { 2066 struct rl_softc *sc; 2067 2068 sc = ifp->if_softc; 2069 2070 mii_pollstat(&sc->sc_mii); 2071 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2072 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2073 } 2074 2075 int 2076 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2077 { 2078 struct rl_softc *sc = ifp->if_softc; 2079 struct ifreq *ifr = (struct ifreq *) data; 2080 int s, error = 0; 2081 2082 s = splnet(); 2083 2084 switch(command) { 2085 case SIOCSIFADDR: 2086 ifp->if_flags |= IFF_UP; 2087 if (!(ifp->if_flags & IFF_RUNNING)) 2088 re_init(ifp); 2089 break; 2090 case SIOCSIFFLAGS: 2091 if (ifp->if_flags & IFF_UP) { 2092 if (ifp->if_flags & IFF_RUNNING) 2093 error = ENETRESET; 2094 else 2095 re_init(ifp); 2096 } else { 2097 if (ifp->if_flags & IFF_RUNNING) 2098 re_stop(ifp); 2099 } 2100 break; 2101 case SIOCGIFMEDIA: 2102 case SIOCSIFMEDIA: 2103 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2104 break; 2105 case SIOCGIFRXR: 2106 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2107 NULL, RL_FRAMELEN(sc->rl_max_mtu), &sc->rl_ldata.rl_rx_ring); 2108 break; 2109 default: 2110 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2111 } 2112 2113 if (error == ENETRESET) { 2114 if (ifp->if_flags & IFF_RUNNING) 2115 re_iff(sc); 2116 error = 0; 2117 } 2118 2119 splx(s); 2120 return (error); 2121 } 2122 2123 void 2124 re_watchdog(struct ifnet *ifp) 2125 { 2126 struct rl_softc *sc; 2127 int s; 2128 2129 sc = ifp->if_softc; 2130 s = splnet(); 2131 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2132 2133 re_txeof(sc); 2134 re_rxeof(sc); 2135 2136 re_init(ifp); 2137 2138 splx(s); 2139 } 2140 2141 /* 2142 * Stop the adapter and free any mbufs allocated to the 2143 * RX and TX lists. 2144 */ 2145 void 2146 re_stop(struct ifnet *ifp) 2147 { 2148 struct rl_softc *sc; 2149 int i; 2150 2151 sc = ifp->if_softc; 2152 2153 ifp->if_timer = 0; 2154 sc->rl_flags &= ~RL_FLAG_LINK; 2155 sc->rl_timerintr = 0; 2156 2157 timeout_del(&sc->timer_handle); 2158 ifp->if_flags &= ~IFF_RUNNING; 2159 2160 /* 2161 * Disable accepting frames to put RX MAC into idle state. 2162 * Otherwise it's possible to get frames while stop command 2163 * execution is in progress and controller can DMA the frame 2164 * to already freed RX buffer during that period. 2165 */ 2166 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 2167 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV | 2168 RL_RXCFG_RX_MULTI)); 2169 2170 if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) { 2171 for (i = RL_TIMEOUT; i > 0; i--) { 2172 if ((CSR_READ_1(sc, sc->rl_txstart) & 2173 RL_TXSTART_START) == 0) 2174 break; 2175 DELAY(20); 2176 } 2177 if (i == 0) 2178 printf("%s: stopping TX poll timed out!\n", 2179 sc->sc_dev.dv_xname); 2180 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2181 } else if (sc->rl_flags & RL_FLAG_CMDSTOP) { 2182 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 2183 RL_CMD_RX_ENB); 2184 if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) { 2185 for (i = RL_TIMEOUT; i > 0; i--) { 2186 if ((CSR_READ_4(sc, RL_TXCFG) & 2187 RL_TXCFG_QUEUE_EMPTY) != 0) 2188 break; 2189 DELAY(100); 2190 } 2191 if (i == 0) 2192 printf("%s: stopping TXQ timed out!\n", 2193 sc->sc_dev.dv_xname); 2194 } 2195 } else 2196 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2197 DELAY(1000); 2198 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2199 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2200 2201 intr_barrier(sc->sc_ih); 2202 ifq_barrier(&ifp->if_snd); 2203 2204 ifq_clr_oactive(&ifp->if_snd); 2205 mii_down(&sc->sc_mii); 2206 2207 if (sc->rl_head != NULL) { 2208 m_freem(sc->rl_head); 2209 sc->rl_head = sc->rl_tail = NULL; 2210 } 2211 2212 /* Free the TX list buffers. */ 2213 for (i = 0; i < RL_TX_QLEN; i++) { 2214 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2215 bus_dmamap_unload(sc->sc_dmat, 2216 sc->rl_ldata.rl_txq[i].txq_dmamap); 2217 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2218 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2219 } 2220 } 2221 2222 /* Free the RX list buffers. */ 2223 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2224 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2225 bus_dmamap_unload(sc->sc_dmat, 2226 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2227 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2228 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2229 } 2230 } 2231 } 2232 2233 void 2234 re_setup_hw_im(struct rl_softc *sc) 2235 { 2236 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2237 2238 /* 2239 * Interrupt moderation 2240 * 2241 * 0xABCD 2242 * A - unknown (maybe TX related) 2243 * B - TX timer (unit: 25us) 2244 * C - unknown (maybe RX related) 2245 * D - RX timer (unit: 25us) 2246 * 2247 * 2248 * re(4)'s interrupt moderation is actually controlled by 2249 * two variables, like most other NICs (bge, bnx etc.) 2250 * o timer 2251 * o number of packets [P] 2252 * 2253 * The logic relationship between these two variables is 2254 * similar to other NICs too: 2255 * if (timer expire || packets > [P]) 2256 * Interrupt is delivered 2257 * 2258 * Currently we only know how to set 'timer', but not 2259 * 'number of packets', which should be ~30, as far as I 2260 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2261 */ 2262 CSR_WRITE_2(sc, RL_IM, 2263 RL_IM_RXTIME(sc->rl_rx_time) | 2264 RL_IM_TXTIME(sc->rl_tx_time) | 2265 RL_IM_MAGIC); 2266 } 2267 2268 void 2269 re_disable_hw_im(struct rl_softc *sc) 2270 { 2271 if (sc->rl_flags & RL_FLAG_HWIM) 2272 CSR_WRITE_2(sc, RL_IM, 0); 2273 } 2274 2275 void 2276 re_setup_sim_im(struct rl_softc *sc) 2277 { 2278 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2279 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2280 else { 2281 u_int32_t nticks; 2282 2283 /* 2284 * Datasheet says tick decreases at bus speed, 2285 * but it seems the clock runs a little bit 2286 * faster, so we do some compensation here. 2287 */ 2288 nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2289 CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks); 2290 } 2291 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2292 sc->rl_timerintr = 1; 2293 } 2294 2295 void 2296 re_disable_sim_im(struct rl_softc *sc) 2297 { 2298 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2299 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2300 else 2301 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2302 sc->rl_timerintr = 0; 2303 } 2304 2305 void 2306 re_config_imtype(struct rl_softc *sc, int imtype) 2307 { 2308 switch (imtype) { 2309 case RL_IMTYPE_HW: 2310 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2311 /* FALLTHROUGH */ 2312 case RL_IMTYPE_NONE: 2313 sc->rl_intrs = RL_INTRS_CPLUS; 2314 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2315 RL_ISR_RX_OVERRUN; 2316 sc->rl_tx_ack = RL_ISR_TX_OK; 2317 break; 2318 2319 case RL_IMTYPE_SIM: 2320 sc->rl_intrs = RL_INTRS_TIMER; 2321 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2322 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2323 break; 2324 2325 default: 2326 panic("%s: unknown imtype %d", 2327 sc->sc_dev.dv_xname, imtype); 2328 } 2329 } 2330 2331 void 2332 re_set_jumbo(struct rl_softc *sc) 2333 { 2334 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2335 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) | 2336 RL_CFG3_JUMBO_EN0); 2337 2338 switch (sc->sc_hwrev) { 2339 case RL_HWREV_8168DP: 2340 break; 2341 case RL_HWREV_8168E: 2342 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2343 RL_CFG4_8168E_JUMBO_EN1); 2344 break; 2345 default: 2346 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2347 RL_CFG4_JUMBO_EN1); 2348 break; 2349 } 2350 2351 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2352 } 2353 2354 void 2355 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2356 { 2357 re_config_imtype(sc, imtype); 2358 2359 if (enable_intrs) 2360 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2361 else 2362 CSR_WRITE_2(sc, RL_IMR, 0); 2363 2364 switch (imtype) { 2365 case RL_IMTYPE_NONE: 2366 re_disable_sim_im(sc); 2367 re_disable_hw_im(sc); 2368 break; 2369 2370 case RL_IMTYPE_HW: 2371 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2372 re_disable_sim_im(sc); 2373 re_setup_hw_im(sc); 2374 break; 2375 2376 case RL_IMTYPE_SIM: 2377 re_disable_hw_im(sc); 2378 re_setup_sim_im(sc); 2379 break; 2380 2381 default: 2382 panic("%s: unknown imtype %d", 2383 sc->sc_dev.dv_xname, imtype); 2384 } 2385 } 2386 2387 #ifndef SMALL_KERNEL 2388 int 2389 re_wol(struct ifnet *ifp, int enable) 2390 { 2391 struct rl_softc *sc = ifp->if_softc; 2392 u_int8_t val; 2393 2394 if (enable) { 2395 if ((CSR_READ_1(sc, sc->rl_cfg1) & RL_CFG1_PME) == 0) { 2396 printf("%s: power management is disabled, " 2397 "cannot do WOL\n", sc->sc_dev.dv_xname); 2398 return (ENOTSUP); 2399 } 2400 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_AUXPWR) == 0) 2401 printf("%s: no auxiliary power, cannot do WOL from D3 " 2402 "(power-off) state\n", sc->sc_dev.dv_xname); 2403 } 2404 2405 re_iff(sc); 2406 2407 /* Temporarily enable write to configuration registers. */ 2408 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2409 2410 /* Always disable all wake events except magic packet. */ 2411 if (enable) { 2412 val = CSR_READ_1(sc, sc->rl_cfg5); 2413 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2414 RL_CFG5_WOL_BCAST); 2415 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2416 2417 val = CSR_READ_1(sc, sc->rl_cfg3); 2418 val |= RL_CFG3_WOL_MAGIC; 2419 val &= ~RL_CFG3_WOL_LINK; 2420 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2421 } else { 2422 val = CSR_READ_1(sc, sc->rl_cfg5); 2423 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2424 RL_CFG5_WOL_BCAST); 2425 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2426 2427 val = CSR_READ_1(sc, sc->rl_cfg3); 2428 val &= ~(RL_CFG3_WOL_MAGIC | RL_CFG3_WOL_LINK); 2429 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2430 } 2431 2432 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2433 2434 return (0); 2435 } 2436 #endif 2437