1 /* $OpenBSD: re.c,v 1.219 2024/11/05 18:58:59 miod Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Realtek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support Realtek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the Realtek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 #include "kstat.h" 113 114 #include <sys/param.h> 115 #include <sys/endian.h> 116 #include <sys/systm.h> 117 #include <sys/sockio.h> 118 #include <sys/mbuf.h> 119 #include <sys/malloc.h> 120 #include <sys/kernel.h> 121 #include <sys/device.h> 122 #include <sys/timeout.h> 123 #include <sys/socket.h> 124 #include <sys/atomic.h> 125 126 #include <machine/bus.h> 127 128 #include <net/if.h> 129 #include <net/if_media.h> 130 131 #include <netinet/in.h> 132 #include <netinet/ip.h> 133 #include <netinet/if_ether.h> 134 135 #if NBPFILTER > 0 136 #include <net/bpf.h> 137 #endif 138 139 #if NKSTAT > 0 140 #include <sys/kstat.h> 141 #endif 142 143 #include <dev/mii/mii.h> 144 #include <dev/mii/miivar.h> 145 146 #include <dev/pci/pcidevs.h> 147 148 #include <dev/ic/rtl81x9reg.h> 149 #include <dev/ic/revar.h> 150 151 #ifdef RE_DEBUG 152 int redebug = 0; 153 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 154 #else 155 #define DPRINTF(x) 156 #endif 157 158 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 159 160 int re_encap(struct rl_softc *, unsigned int, struct mbuf *); 161 162 int re_newbuf(struct rl_softc *); 163 int re_rx_list_init(struct rl_softc *); 164 void re_rx_list_fill(struct rl_softc *); 165 int re_tx_list_init(struct rl_softc *); 166 int re_rxeof(struct rl_softc *); 167 int re_txeof(struct rl_softc *); 168 void re_tick(void *); 169 void re_start(struct ifqueue *); 170 void re_txstart(void *); 171 int re_ioctl(struct ifnet *, u_long, caddr_t); 172 void re_watchdog(struct ifnet *); 173 int re_ifmedia_upd(struct ifnet *); 174 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 175 176 void re_set_jumbo(struct rl_softc *); 177 178 void re_eeprom_putbyte(struct rl_softc *, int); 179 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 180 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 181 182 int re_gmii_readreg(struct device *, int, int); 183 void re_gmii_writereg(struct device *, int, int, int); 184 185 int re_miibus_readreg(struct device *, int, int); 186 void re_miibus_writereg(struct device *, int, int, int); 187 void re_miibus_statchg(struct device *); 188 189 void re_iff(struct rl_softc *); 190 191 void re_setup_hw_im(struct rl_softc *); 192 void re_setup_sim_im(struct rl_softc *); 193 void re_disable_hw_im(struct rl_softc *); 194 void re_disable_sim_im(struct rl_softc *); 195 void re_config_imtype(struct rl_softc *, int); 196 void re_setup_intr(struct rl_softc *, int, int); 197 #ifndef SMALL_KERNEL 198 int re_wol(struct ifnet*, int); 199 #endif 200 #if NKSTAT > 0 201 void re_kstat_attach(struct rl_softc *); 202 void re_kstat_detach(struct rl_softc *); 203 #endif 204 205 void in_delayed_cksum(struct mbuf *); 206 207 struct cfdriver re_cd = { 208 NULL, "re", DV_IFNET 209 }; 210 211 #define EE_SET(x) \ 212 CSR_WRITE_1(sc, RL_EECMD, \ 213 CSR_READ_1(sc, RL_EECMD) | x) 214 215 #define EE_CLR(x) \ 216 CSR_WRITE_1(sc, RL_EECMD, \ 217 CSR_READ_1(sc, RL_EECMD) & ~x) 218 219 #define RL_FRAMELEN(mtu) \ 220 (mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + \ 221 ETHER_VLAN_ENCAP_LEN) 222 223 static const struct re_revision { 224 u_int32_t re_chipid; 225 const char *re_name; 226 } re_revisions[] = { 227 { RL_HWREV_8100, "RTL8100" }, 228 { RL_HWREV_8100E, "RTL8100E" }, 229 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 230 { RL_HWREV_8101, "RTL8101" }, 231 { RL_HWREV_8101E, "RTL8101E" }, 232 { RL_HWREV_8102E, "RTL8102E" }, 233 { RL_HWREV_8106E, "RTL8106E" }, 234 { RL_HWREV_8401E, "RTL8401E" }, 235 { RL_HWREV_8402, "RTL8402" }, 236 { RL_HWREV_8411, "RTL8411" }, 237 { RL_HWREV_8411B, "RTL8411B" }, 238 { RL_HWREV_8102EL, "RTL8102EL" }, 239 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 240 { RL_HWREV_8103E, "RTL8103E" }, 241 { RL_HWREV_8110S, "RTL8110S" }, 242 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 243 { RL_HWREV_8168B_SPIN1, "RTL8168 1" }, 244 { RL_HWREV_8168B_SPIN2, "RTL8168 2" }, 245 { RL_HWREV_8168B_SPIN3, "RTL8168 3" }, 246 { RL_HWREV_8168C, "RTL8168C/8111C" }, 247 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 248 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 249 { RL_HWREV_8168F, "RTL8168F/8111F" }, 250 { RL_HWREV_8168G, "RTL8168G/8111G" }, 251 { RL_HWREV_8168GU, "RTL8168GU/8111GU" }, 252 { RL_HWREV_8168H, "RTL8168H/8111H" }, 253 { RL_HWREV_8105E, "RTL8105E" }, 254 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 255 { RL_HWREV_8168D, "RTL8168D/8111D" }, 256 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 257 { RL_HWREV_8168E, "RTL8168E/8111E" }, 258 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 259 { RL_HWREV_8168EP, "RTL8168EP/8111EP" }, 260 { RL_HWREV_8168FP, "RTL8168FP/8111FP" }, 261 { RL_HWREV_8169, "RTL8169" }, 262 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 263 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 264 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 265 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 266 { RL_HWREV_8169S, "RTL8169S" }, 267 268 { 0, NULL } 269 }; 270 271 272 static inline void 273 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 274 { 275 d->rl_bufaddr_lo = htole32((uint32_t)addr); 276 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 277 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 278 else 279 d->rl_bufaddr_hi = 0; 280 } 281 282 /* 283 * Send a read command and address to the EEPROM, check for ACK. 284 */ 285 void 286 re_eeprom_putbyte(struct rl_softc *sc, int addr) 287 { 288 int d, i; 289 290 d = addr | (RL_9346_READ << sc->rl_eewidth); 291 292 /* 293 * Feed in each bit and strobe the clock. 294 */ 295 296 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 297 if (d & i) 298 EE_SET(RL_EE_DATAIN); 299 else 300 EE_CLR(RL_EE_DATAIN); 301 DELAY(100); 302 EE_SET(RL_EE_CLK); 303 DELAY(150); 304 EE_CLR(RL_EE_CLK); 305 DELAY(100); 306 } 307 } 308 309 /* 310 * Read a word of data stored in the EEPROM at address 'addr.' 311 */ 312 void 313 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 314 { 315 int i; 316 u_int16_t word = 0; 317 318 /* 319 * Send address of word we want to read. 320 */ 321 re_eeprom_putbyte(sc, addr); 322 323 /* 324 * Start reading bits from EEPROM. 325 */ 326 for (i = 0x8000; i; i >>= 1) { 327 EE_SET(RL_EE_CLK); 328 DELAY(100); 329 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 330 word |= i; 331 EE_CLR(RL_EE_CLK); 332 DELAY(100); 333 } 334 335 *dest = word; 336 } 337 338 /* 339 * Read a sequence of words from the EEPROM. 340 */ 341 void 342 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 343 { 344 int i; 345 u_int16_t word = 0, *ptr; 346 347 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 348 349 DELAY(100); 350 351 for (i = 0; i < cnt; i++) { 352 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 353 re_eeprom_getword(sc, off + i, &word); 354 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 355 ptr = (u_int16_t *)(dest + (i * 2)); 356 *ptr = word; 357 } 358 359 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 360 } 361 362 int 363 re_gmii_readreg(struct device *self, int phy, int reg) 364 { 365 struct rl_softc *sc = (struct rl_softc *)self; 366 u_int32_t rval; 367 int i; 368 369 if (phy != 7) 370 return (0); 371 372 /* Let the rgephy driver read the GMEDIASTAT register */ 373 374 if (reg == RL_GMEDIASTAT) { 375 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 376 return (rval); 377 } 378 379 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 380 381 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 382 rval = CSR_READ_4(sc, RL_PHYAR); 383 if (rval & RL_PHYAR_BUSY) 384 break; 385 DELAY(25); 386 } 387 388 if (i == RL_PHY_TIMEOUT) { 389 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 390 return (0); 391 } 392 393 DELAY(20); 394 395 return (rval & RL_PHYAR_PHYDATA); 396 } 397 398 void 399 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 400 { 401 struct rl_softc *sc = (struct rl_softc *)dev; 402 u_int32_t rval; 403 int i; 404 405 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 406 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 407 408 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 409 rval = CSR_READ_4(sc, RL_PHYAR); 410 if (!(rval & RL_PHYAR_BUSY)) 411 break; 412 DELAY(25); 413 } 414 415 if (i == RL_PHY_TIMEOUT) 416 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 417 418 DELAY(20); 419 } 420 421 int 422 re_miibus_readreg(struct device *dev, int phy, int reg) 423 { 424 struct rl_softc *sc = (struct rl_softc *)dev; 425 u_int16_t rval = 0; 426 u_int16_t re8139_reg = 0; 427 int s; 428 429 s = splnet(); 430 431 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 432 rval = re_gmii_readreg(dev, phy, reg); 433 splx(s); 434 return (rval); 435 } 436 437 /* Pretend the internal PHY is only at address 0 */ 438 if (phy) { 439 splx(s); 440 return (0); 441 } 442 switch(reg) { 443 case MII_BMCR: 444 re8139_reg = RL_BMCR; 445 break; 446 case MII_BMSR: 447 re8139_reg = RL_BMSR; 448 break; 449 case MII_ANAR: 450 re8139_reg = RL_ANAR; 451 break; 452 case MII_ANER: 453 re8139_reg = RL_ANER; 454 break; 455 case MII_ANLPAR: 456 re8139_reg = RL_LPAR; 457 break; 458 case MII_PHYIDR1: 459 case MII_PHYIDR2: 460 splx(s); 461 return (0); 462 /* 463 * Allow the rlphy driver to read the media status 464 * register. If we have a link partner which does not 465 * support NWAY, this is the register which will tell 466 * us the results of parallel detection. 467 */ 468 case RL_MEDIASTAT: 469 rval = CSR_READ_1(sc, RL_MEDIASTAT); 470 splx(s); 471 return (rval); 472 default: 473 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 474 splx(s); 475 return (0); 476 } 477 rval = CSR_READ_2(sc, re8139_reg); 478 if (re8139_reg == RL_BMCR) { 479 /* 8139C+ has different bit layout. */ 480 rval &= ~(BMCR_LOOP | BMCR_ISO); 481 } 482 splx(s); 483 return (rval); 484 } 485 486 void 487 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 488 { 489 struct rl_softc *sc = (struct rl_softc *)dev; 490 u_int16_t re8139_reg = 0; 491 int s; 492 493 s = splnet(); 494 495 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 496 re_gmii_writereg(dev, phy, reg, data); 497 splx(s); 498 return; 499 } 500 501 /* Pretend the internal PHY is only at address 0 */ 502 if (phy) { 503 splx(s); 504 return; 505 } 506 switch(reg) { 507 case MII_BMCR: 508 re8139_reg = RL_BMCR; 509 /* 8139C+ has different bit layout. */ 510 data &= ~(BMCR_LOOP | BMCR_ISO); 511 break; 512 case MII_BMSR: 513 re8139_reg = RL_BMSR; 514 break; 515 case MII_ANAR: 516 re8139_reg = RL_ANAR; 517 break; 518 case MII_ANER: 519 re8139_reg = RL_ANER; 520 break; 521 case MII_ANLPAR: 522 re8139_reg = RL_LPAR; 523 break; 524 case MII_PHYIDR1: 525 case MII_PHYIDR2: 526 splx(s); 527 return; 528 break; 529 default: 530 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 531 splx(s); 532 return; 533 } 534 CSR_WRITE_2(sc, re8139_reg, data); 535 splx(s); 536 } 537 538 void 539 re_miibus_statchg(struct device *dev) 540 { 541 struct rl_softc *sc = (struct rl_softc *)dev; 542 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 543 struct mii_data *mii = &sc->sc_mii; 544 545 if ((ifp->if_flags & IFF_RUNNING) == 0) 546 return; 547 548 sc->rl_flags &= ~RL_FLAG_LINK; 549 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 550 (IFM_ACTIVE | IFM_AVALID)) { 551 switch (IFM_SUBTYPE(mii->mii_media_active)) { 552 case IFM_10_T: 553 case IFM_100_TX: 554 sc->rl_flags |= RL_FLAG_LINK; 555 break; 556 case IFM_1000_T: 557 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 558 break; 559 sc->rl_flags |= RL_FLAG_LINK; 560 break; 561 default: 562 break; 563 } 564 } 565 566 /* 567 * Realtek controllers do not provide an interface to 568 * Tx/Rx MACs for resolved speed, duplex and flow-control 569 * parameters. 570 */ 571 } 572 573 void 574 re_iff(struct rl_softc *sc) 575 { 576 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 577 int h = 0; 578 u_int32_t hashes[2]; 579 u_int32_t rxfilt; 580 struct arpcom *ac = &sc->sc_arpcom; 581 struct ether_multi *enm; 582 struct ether_multistep step; 583 584 rxfilt = CSR_READ_4(sc, RL_RXCFG); 585 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 586 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 587 ifp->if_flags &= ~IFF_ALLMULTI; 588 589 /* 590 * Always accept frames destined to our station address. 591 * Always accept broadcast frames. 592 */ 593 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 594 595 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 596 ifp->if_flags |= IFF_ALLMULTI; 597 rxfilt |= RL_RXCFG_RX_MULTI; 598 if (ifp->if_flags & IFF_PROMISC) 599 rxfilt |= RL_RXCFG_RX_ALLPHYS; 600 hashes[0] = hashes[1] = 0xFFFFFFFF; 601 } else { 602 rxfilt |= RL_RXCFG_RX_MULTI; 603 /* Program new filter. */ 604 bzero(hashes, sizeof(hashes)); 605 606 ETHER_FIRST_MULTI(step, ac, enm); 607 while (enm != NULL) { 608 h = ether_crc32_be(enm->enm_addrlo, 609 ETHER_ADDR_LEN) >> 26; 610 611 if (h < 32) 612 hashes[0] |= (1 << h); 613 else 614 hashes[1] |= (1 << (h - 32)); 615 616 ETHER_NEXT_MULTI(step, enm); 617 } 618 } 619 620 /* 621 * For some unfathomable reason, Realtek decided to reverse 622 * the order of the multicast hash registers in the PCI Express 623 * parts. This means we have to write the hash pattern in reverse 624 * order for those devices. 625 */ 626 if (sc->rl_flags & RL_FLAG_PCIE) { 627 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 628 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 629 } else { 630 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 631 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 632 } 633 634 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 635 } 636 637 void 638 re_reset(struct rl_softc *sc) 639 { 640 int i; 641 642 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 643 644 for (i = 0; i < RL_TIMEOUT; i++) { 645 DELAY(10); 646 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 647 break; 648 } 649 if (i == RL_TIMEOUT) 650 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 651 652 if (sc->rl_flags & RL_FLAG_MACRESET) 653 CSR_WRITE_1(sc, RL_LDPS, 1); 654 } 655 656 /* 657 * Attach the interface. Allocate softc structures, do ifmedia 658 * setup and ethernet/BPF attach. 659 */ 660 int 661 re_attach(struct rl_softc *sc, const char *intrstr) 662 { 663 u_char eaddr[ETHER_ADDR_LEN]; 664 u_int16_t as[ETHER_ADDR_LEN / 2]; 665 struct ifnet *ifp; 666 u_int16_t re_did = 0; 667 int error = 0, i; 668 const struct re_revision *rr; 669 const char *re_name = NULL; 670 671 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 672 673 switch (sc->sc_hwrev) { 674 case RL_HWREV_8139CPLUS: 675 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 676 sc->rl_max_mtu = RL_MTU; 677 break; 678 case RL_HWREV_8100E: 679 case RL_HWREV_8100E_SPIN2: 680 case RL_HWREV_8101E: 681 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 682 sc->rl_max_mtu = RL_MTU; 683 break; 684 case RL_HWREV_8103E: 685 sc->rl_flags |= RL_FLAG_MACSLEEP; 686 /* FALLTHROUGH */ 687 case RL_HWREV_8102E: 688 case RL_HWREV_8102EL: 689 case RL_HWREV_8102EL_SPIN1: 690 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 691 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | 692 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 693 sc->rl_max_mtu = RL_MTU; 694 break; 695 case RL_HWREV_8401E: 696 case RL_HWREV_8105E: 697 case RL_HWREV_8105E_SPIN1: 698 case RL_HWREV_8106E: 699 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 700 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 701 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 702 sc->rl_max_mtu = RL_MTU; 703 break; 704 case RL_HWREV_8402: 705 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 706 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 707 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 708 RL_FLAG_CMDSTOP_WAIT_TXQ; 709 sc->rl_max_mtu = RL_MTU; 710 break; 711 case RL_HWREV_8168B_SPIN1: 712 case RL_HWREV_8168B_SPIN2: 713 sc->rl_flags |= RL_FLAG_WOLRXENB; 714 /* FALLTHROUGH */ 715 case RL_HWREV_8168B_SPIN3: 716 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 717 sc->rl_max_mtu = RL_MTU; 718 break; 719 case RL_HWREV_8168C_SPIN2: 720 sc->rl_flags |= RL_FLAG_MACSLEEP; 721 /* FALLTHROUGH */ 722 case RL_HWREV_8168C: 723 case RL_HWREV_8168CP: 724 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 725 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 726 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 727 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 728 break; 729 case RL_HWREV_8168D: 730 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 731 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 732 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 733 RL_FLAG_WOL_MANLINK; 734 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 735 break; 736 case RL_HWREV_8168DP: 737 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 738 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 739 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 740 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 741 break; 742 case RL_HWREV_8168E: 743 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 744 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 745 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 746 RL_FLAG_WOL_MANLINK; 747 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 748 break; 749 case RL_HWREV_8168E_VL: 750 sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR | 751 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 752 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 753 RL_FLAG_WOL_MANLINK; 754 sc->rl_max_mtu = RL_JUMBO_MTU_6K; 755 break; 756 case RL_HWREV_8168F: 757 sc->rl_flags |= RL_FLAG_EARLYOFF; 758 /* FALLTHROUGH */ 759 case RL_HWREV_8411: 760 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 761 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 762 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | 763 RL_FLAG_WOL_MANLINK; 764 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 765 break; 766 case RL_HWREV_8168EP: 767 case RL_HWREV_8168FP: 768 case RL_HWREV_8168G: 769 case RL_HWREV_8168GU: 770 case RL_HWREV_8168H: 771 case RL_HWREV_8411B: 772 if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) { 773 /* RTL8106EUS */ 774 sc->rl_flags |= RL_FLAG_FASTETHER; 775 sc->rl_max_mtu = RL_MTU; 776 } else { 777 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 778 sc->rl_max_mtu = RL_JUMBO_MTU_9K; 779 } 780 781 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 782 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 783 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 784 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 785 break; 786 case RL_HWREV_8169_8110SB: 787 case RL_HWREV_8169_8110SBL: 788 case RL_HWREV_8169_8110SCd: 789 case RL_HWREV_8169_8110SCe: 790 sc->rl_flags |= RL_FLAG_PHYWAKE; 791 /* FALLTHROUGH */ 792 case RL_HWREV_8169: 793 case RL_HWREV_8169S: 794 case RL_HWREV_8110S: 795 sc->rl_flags |= RL_FLAG_MACRESET; 796 sc->rl_max_mtu = RL_JUMBO_MTU_7K; 797 break; 798 default: 799 break; 800 } 801 802 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 803 sc->rl_cfg0 = RL_8139_CFG0; 804 sc->rl_cfg1 = RL_8139_CFG1; 805 sc->rl_cfg2 = 0; 806 sc->rl_cfg3 = RL_8139_CFG3; 807 sc->rl_cfg4 = RL_8139_CFG4; 808 sc->rl_cfg5 = RL_8139_CFG5; 809 } else { 810 sc->rl_cfg0 = RL_CFG0; 811 sc->rl_cfg1 = RL_CFG1; 812 sc->rl_cfg2 = RL_CFG2; 813 sc->rl_cfg3 = RL_CFG3; 814 sc->rl_cfg4 = RL_CFG4; 815 sc->rl_cfg5 = RL_CFG5; 816 } 817 818 /* Reset the adapter. */ 819 re_reset(sc); 820 821 sc->rl_tx_time = 5; /* 125us */ 822 sc->rl_rx_time = 2; /* 50us */ 823 if (sc->rl_flags & RL_FLAG_PCIE) 824 sc->rl_sim_time = 75; /* 75us */ 825 else 826 sc->rl_sim_time = 125; /* 125us */ 827 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 828 829 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 830 sc->rl_bus_speed = 33; /* XXX */ 831 else if (sc->rl_flags & RL_FLAG_PCIE) 832 sc->rl_bus_speed = 125; 833 else { 834 u_int8_t cfg2; 835 836 cfg2 = CSR_READ_1(sc, sc->rl_cfg2); 837 switch (cfg2 & RL_CFG2_PCI_MASK) { 838 case RL_CFG2_PCI_33MHZ: 839 sc->rl_bus_speed = 33; 840 break; 841 case RL_CFG2_PCI_66MHZ: 842 sc->rl_bus_speed = 66; 843 break; 844 default: 845 printf("%s: unknown bus speed, assume 33MHz\n", 846 sc->sc_dev.dv_xname); 847 sc->rl_bus_speed = 33; 848 break; 849 } 850 851 if (cfg2 & RL_CFG2_PCI_64BIT) 852 sc->rl_flags |= RL_FLAG_PCI64; 853 } 854 855 re_config_imtype(sc, sc->rl_imtype); 856 857 if (sc->rl_flags & RL_FLAG_PAR) { 858 /* 859 * XXX Should have a better way to extract station 860 * address from EEPROM. 861 */ 862 for (i = 0; i < ETHER_ADDR_LEN; i++) 863 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 864 } else { 865 sc->rl_eewidth = RL_9356_ADDR_LEN; 866 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 867 if (re_did != 0x8129) 868 sc->rl_eewidth = RL_9346_ADDR_LEN; 869 870 /* 871 * Get station address from the EEPROM. 872 */ 873 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 874 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 875 as[i] = letoh16(as[i]); 876 bcopy(as, eaddr, ETHER_ADDR_LEN); 877 } 878 879 /* 880 * Set RX length mask, TX poll request register 881 * and descriptor count. 882 */ 883 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 884 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 885 sc->rl_txstart = RL_TXSTART; 886 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 887 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 888 sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS; 889 } else { 890 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 891 sc->rl_txstart = RL_GTXSTART; 892 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 893 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 894 sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS; 895 } 896 897 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 898 899 for (rr = re_revisions; rr->re_name != NULL; rr++) { 900 if (rr->re_chipid == sc->sc_hwrev) 901 re_name = rr->re_name; 902 } 903 904 if (re_name == NULL) 905 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 906 else 907 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 908 909 printf(", %s, address %s\n", intrstr, 910 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 911 912 /* Allocate DMA'able memory for the TX ring */ 913 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 914 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 915 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 916 BUS_DMA_ZERO)) != 0) { 917 printf("%s: can't allocate tx listseg, error = %d\n", 918 sc->sc_dev.dv_xname, error); 919 goto fail_0; 920 } 921 922 /* Load the map for the TX ring. */ 923 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 924 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 925 (caddr_t *)&sc->rl_ldata.rl_tx_list, 926 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 927 printf("%s: can't map tx list, error = %d\n", 928 sc->sc_dev.dv_xname, error); 929 goto fail_1; 930 } 931 932 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 933 RL_TX_LIST_SZ(sc), 0, 0, 934 &sc->rl_ldata.rl_tx_list_map)) != 0) { 935 printf("%s: can't create tx list map, error = %d\n", 936 sc->sc_dev.dv_xname, error); 937 goto fail_2; 938 } 939 940 if ((error = bus_dmamap_load(sc->sc_dmat, 941 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 942 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 943 printf("%s: can't load tx list, error = %d\n", 944 sc->sc_dev.dv_xname, error); 945 goto fail_3; 946 } 947 948 /* Create DMA maps for TX buffers */ 949 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 950 error = bus_dmamap_create(sc->sc_dmat, 951 RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs, 952 RL_JUMBO_FRAMELEN, 0, 0, 953 &sc->rl_ldata.rl_txq[i].txq_dmamap); 954 if (error) { 955 printf("%s: can't create DMA map for TX\n", 956 sc->sc_dev.dv_xname); 957 goto fail_4; 958 } 959 } 960 961 /* Allocate DMA'able memory for the RX ring */ 962 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 963 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 964 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 965 BUS_DMA_ZERO)) != 0) { 966 printf("%s: can't allocate rx listnseg, error = %d\n", 967 sc->sc_dev.dv_xname, error); 968 goto fail_4; 969 } 970 971 /* Load the map for the RX ring. */ 972 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 973 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc), 974 (caddr_t *)&sc->rl_ldata.rl_rx_list, 975 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 976 printf("%s: can't map rx list, error = %d\n", 977 sc->sc_dev.dv_xname, error); 978 goto fail_5; 979 980 } 981 982 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1, 983 RL_RX_DMAMEM_SZ(sc), 0, 0, 984 &sc->rl_ldata.rl_rx_list_map)) != 0) { 985 printf("%s: can't create rx list map, error = %d\n", 986 sc->sc_dev.dv_xname, error); 987 goto fail_6; 988 } 989 990 if ((error = bus_dmamap_load(sc->sc_dmat, 991 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 992 RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 993 printf("%s: can't load rx list, error = %d\n", 994 sc->sc_dev.dv_xname, error); 995 goto fail_7; 996 } 997 998 /* Create DMA maps for RX buffers */ 999 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1000 error = bus_dmamap_create(sc->sc_dmat, 1001 RL_FRAMELEN(sc->rl_max_mtu), 1, 1002 RL_FRAMELEN(sc->rl_max_mtu), 0, 0, 1003 &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1004 if (error) { 1005 printf("%s: can't create DMA map for RX\n", 1006 sc->sc_dev.dv_xname); 1007 goto fail_8; 1008 } 1009 } 1010 1011 ifp = &sc->sc_arpcom.ac_if; 1012 ifp->if_softc = sc; 1013 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1014 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1015 ifp->if_xflags = IFXF_MPSAFE; 1016 ifp->if_ioctl = re_ioctl; 1017 ifp->if_qstart = re_start; 1018 ifp->if_watchdog = re_watchdog; 1019 ifp->if_hardmtu = sc->rl_max_mtu; 1020 ifq_init_maxlen(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt); 1021 1022 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 1023 IFCAP_CSUM_UDPv4; 1024 1025 /* 1026 * RTL8168/8111C generates wrong IP checksummed frame if the 1027 * packet has IP options so disable TX IP checksum offloading. 1028 */ 1029 switch (sc->sc_hwrev) { 1030 case RL_HWREV_8168C: 1031 case RL_HWREV_8168C_SPIN2: 1032 case RL_HWREV_8168CP: 1033 break; 1034 default: 1035 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1036 } 1037 1038 #if NVLAN > 0 1039 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1040 #endif 1041 1042 #ifndef SMALL_KERNEL 1043 ifp->if_capabilities |= IFCAP_WOL; 1044 ifp->if_wol = re_wol; 1045 re_wol(ifp, 0); 1046 #endif 1047 timeout_set(&sc->timer_handle, re_tick, sc); 1048 task_set(&sc->rl_start, re_txstart, sc); 1049 1050 /* Take PHY out of power down mode. */ 1051 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1052 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1053 if (sc->sc_hwrev == RL_HWREV_8401E) 1054 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1055 } 1056 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1057 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1058 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1059 } 1060 1061 /* Do MII setup */ 1062 sc->sc_mii.mii_ifp = ifp; 1063 sc->sc_mii.mii_readreg = re_miibus_readreg; 1064 sc->sc_mii.mii_writereg = re_miibus_writereg; 1065 sc->sc_mii.mii_statchg = re_miibus_statchg; 1066 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1067 re_ifmedia_sts); 1068 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1069 MII_OFFSET_ANY, MIIF_DOPAUSE); 1070 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1071 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1072 ifmedia_add(&sc->sc_mii.mii_media, 1073 IFM_ETHER|IFM_NONE, 0, NULL); 1074 ifmedia_set(&sc->sc_mii.mii_media, 1075 IFM_ETHER|IFM_NONE); 1076 } else 1077 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1078 1079 /* 1080 * Call MI attach routine. 1081 */ 1082 if_attach(ifp); 1083 ether_ifattach(ifp); 1084 1085 #if NKSTAT > 0 1086 re_kstat_attach(sc); 1087 #endif 1088 1089 return (0); 1090 1091 fail_8: 1092 /* Destroy DMA maps for RX buffers. */ 1093 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1094 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1095 bus_dmamap_destroy(sc->sc_dmat, 1096 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1097 } 1098 1099 /* Free DMA'able memory for the RX ring. */ 1100 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1101 fail_7: 1102 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1103 fail_6: 1104 bus_dmamem_unmap(sc->sc_dmat, 1105 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc)); 1106 fail_5: 1107 bus_dmamem_free(sc->sc_dmat, 1108 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1109 1110 fail_4: 1111 /* Destroy DMA maps for TX buffers. */ 1112 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1113 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1114 bus_dmamap_destroy(sc->sc_dmat, 1115 sc->rl_ldata.rl_txq[i].txq_dmamap); 1116 } 1117 1118 /* Free DMA'able memory for the TX ring. */ 1119 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1120 fail_3: 1121 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1122 fail_2: 1123 bus_dmamem_unmap(sc->sc_dmat, 1124 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1125 fail_1: 1126 bus_dmamem_free(sc->sc_dmat, 1127 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1128 fail_0: 1129 return (1); 1130 } 1131 1132 void 1133 re_detach(struct rl_softc *sc) 1134 { 1135 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1136 1137 #if NKSTAT > 0 1138 re_kstat_detach(sc); 1139 #endif 1140 1141 /* Remove timeout handler */ 1142 timeout_del(&sc->timer_handle); 1143 1144 /* Detach PHY */ 1145 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 1146 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1147 1148 /* Delete media stuff */ 1149 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 1150 ether_ifdetach(ifp); 1151 if_detach(ifp); 1152 } 1153 1154 int 1155 re_newbuf(struct rl_softc *sc) 1156 { 1157 struct mbuf *m; 1158 bus_dmamap_t map; 1159 struct rl_desc *d; 1160 struct rl_rxsoft *rxs; 1161 u_int32_t cmdstat; 1162 int error, idx; 1163 1164 m = MCLGETL(NULL, M_DONTWAIT, RL_FRAMELEN(sc->rl_max_mtu)); 1165 if (!m) 1166 return (ENOBUFS); 1167 1168 /* 1169 * Initialize mbuf length fields and fixup 1170 * alignment so that the frame payload is 1171 * longword aligned on strict alignment archs. 1172 */ 1173 m->m_len = m->m_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu); 1174 m->m_data += RE_ETHER_ALIGN; 1175 1176 idx = sc->rl_ldata.rl_rx_prodidx; 1177 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1178 map = rxs->rxs_dmamap; 1179 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1180 BUS_DMA_READ|BUS_DMA_NOWAIT); 1181 if (error) { 1182 m_freem(m); 1183 return (ENOBUFS); 1184 } 1185 1186 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1187 BUS_DMASYNC_PREREAD); 1188 1189 d = &sc->rl_ldata.rl_rx_list[idx]; 1190 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1191 cmdstat = letoh32(d->rl_cmdstat); 1192 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1193 if (cmdstat & RL_RDESC_STAT_OWN) { 1194 printf("%s: tried to map busy RX descriptor\n", 1195 sc->sc_dev.dv_xname); 1196 m_freem(m); 1197 return (ENOBUFS); 1198 } 1199 1200 rxs->rxs_mbuf = m; 1201 1202 d->rl_vlanctl = 0; 1203 cmdstat = map->dm_segs[0].ds_len; 1204 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1205 cmdstat |= RL_RDESC_CMD_EOR; 1206 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1207 d->rl_cmdstat = htole32(cmdstat); 1208 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1209 cmdstat |= RL_RDESC_CMD_OWN; 1210 d->rl_cmdstat = htole32(cmdstat); 1211 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1212 1213 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1214 1215 return (0); 1216 } 1217 1218 1219 int 1220 re_tx_list_init(struct rl_softc *sc) 1221 { 1222 int i; 1223 1224 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1225 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1226 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1227 } 1228 1229 bus_dmamap_sync(sc->sc_dmat, 1230 sc->rl_ldata.rl_tx_list_map, 0, 1231 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1232 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1233 sc->rl_ldata.rl_txq_prodidx = 0; 1234 sc->rl_ldata.rl_txq_considx = 0; 1235 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1236 sc->rl_ldata.rl_tx_nextfree = 0; 1237 1238 return (0); 1239 } 1240 1241 int 1242 re_rx_list_init(struct rl_softc *sc) 1243 { 1244 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc)); 1245 1246 sc->rl_ldata.rl_rx_prodidx = 0; 1247 sc->rl_ldata.rl_rx_considx = 0; 1248 sc->rl_head = sc->rl_tail = NULL; 1249 1250 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, 1251 sc->rl_ldata.rl_rx_desc_cnt - 1); 1252 re_rx_list_fill(sc); 1253 1254 return (0); 1255 } 1256 1257 void 1258 re_rx_list_fill(struct rl_softc *sc) 1259 { 1260 u_int slots; 1261 1262 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, 1263 sc->rl_ldata.rl_rx_desc_cnt); 1264 slots > 0; slots--) { 1265 if (re_newbuf(sc) == ENOBUFS) 1266 break; 1267 } 1268 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots); 1269 } 1270 1271 /* 1272 * RX handler for C+ and 8169. For the gigE chips, we support 1273 * the reception of jumbo frames that have been fragmented 1274 * across multiple 2K mbuf cluster buffers. 1275 */ 1276 int 1277 re_rxeof(struct rl_softc *sc) 1278 { 1279 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1280 struct mbuf *m; 1281 struct ifnet *ifp; 1282 int i, total_len, rx = 0; 1283 struct rl_desc *cur_rx; 1284 struct rl_rxsoft *rxs; 1285 u_int32_t rxstat, rxvlan; 1286 1287 ifp = &sc->sc_arpcom.ac_if; 1288 1289 for (i = sc->rl_ldata.rl_rx_considx; 1290 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0; 1291 i = RL_NEXT_RX_DESC(sc, i)) { 1292 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1293 RL_RXDESCSYNC(sc, i, 1294 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1295 rxstat = letoh32(cur_rx->rl_cmdstat); 1296 rxvlan = letoh32(cur_rx->rl_vlanctl); 1297 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1298 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1299 break; 1300 total_len = rxstat & sc->rl_rxlenmask; 1301 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1302 m = rxs->rxs_mbuf; 1303 rxs->rxs_mbuf = NULL; 1304 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1); 1305 rx = 1; 1306 1307 /* Invalidate the RX mbuf and unload its map */ 1308 1309 bus_dmamap_sync(sc->sc_dmat, 1310 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1311 BUS_DMASYNC_POSTREAD); 1312 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1313 1314 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 1315 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 1316 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 1317 ifp->if_ierrors++; 1318 m_freem(m); 1319 continue; 1320 } else if (!(rxstat & RL_RDESC_STAT_EOF)) { 1321 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1322 if (sc->rl_head == NULL) 1323 sc->rl_head = sc->rl_tail = m; 1324 else { 1325 m->m_flags &= ~M_PKTHDR; 1326 sc->rl_tail->m_next = m; 1327 sc->rl_tail = m; 1328 } 1329 continue; 1330 } 1331 1332 /* 1333 * NOTE: for the 8139C+, the frame length field 1334 * is always 12 bits in size, but for the gigE chips, 1335 * it is 13 bits (since the max RX frame length is 16K). 1336 * Unfortunately, all 32 bits in the status word 1337 * were already used, so to make room for the extra 1338 * length bit, Realtek took out the 'frame alignment 1339 * error' bit and shifted the other status bits 1340 * over one slot. The OWN, EOR, FS and LS bits are 1341 * still in the same places. We have already extracted 1342 * the frame length and checked the OWN bit, so rather 1343 * than using an alternate bit mapping, we shift the 1344 * status bits one space to the right so we can evaluate 1345 * them using the 8169 status as though it was in the 1346 * same format as that of the 8139C+. 1347 */ 1348 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1349 rxstat >>= 1; 1350 1351 /* 1352 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1353 * set, but if CRC is clear, it will still be a valid frame. 1354 */ 1355 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0 && 1356 !(rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1357 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT))) { 1358 ifp->if_ierrors++; 1359 /* 1360 * If this is part of a multi-fragment packet, 1361 * discard all the pieces. 1362 */ 1363 if (sc->rl_head != NULL) { 1364 m_freem(sc->rl_head); 1365 sc->rl_head = sc->rl_tail = NULL; 1366 } 1367 m_freem(m); 1368 continue; 1369 } 1370 1371 if (sc->rl_head != NULL) { 1372 m->m_len = total_len % RL_FRAMELEN(sc->rl_max_mtu); 1373 if (m->m_len == 0) 1374 m->m_len = RL_FRAMELEN(sc->rl_max_mtu); 1375 /* 1376 * Special case: if there's 4 bytes or less 1377 * in this buffer, the mbuf can be discarded: 1378 * the last 4 bytes is the CRC, which we don't 1379 * care about anyway. 1380 */ 1381 if (m->m_len <= ETHER_CRC_LEN) { 1382 sc->rl_tail->m_len -= 1383 (ETHER_CRC_LEN - m->m_len); 1384 m_freem(m); 1385 } else { 1386 m->m_len -= ETHER_CRC_LEN; 1387 m->m_flags &= ~M_PKTHDR; 1388 sc->rl_tail->m_next = m; 1389 } 1390 m = sc->rl_head; 1391 sc->rl_head = sc->rl_tail = NULL; 1392 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1393 } else 1394 m->m_pkthdr.len = m->m_len = 1395 (total_len - ETHER_CRC_LEN); 1396 1397 /* Do RX checksumming */ 1398 1399 if (sc->rl_flags & RL_FLAG_DESCV2) { 1400 /* Check IP header checksum */ 1401 if ((rxvlan & RL_RDESC_IPV4) && 1402 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1403 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1404 1405 /* Check TCP/UDP checksum */ 1406 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1407 (((rxstat & RL_RDESC_STAT_TCP) && 1408 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1409 ((rxstat & RL_RDESC_STAT_UDP) && 1410 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1411 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1412 M_UDP_CSUM_IN_OK; 1413 } else { 1414 /* Check IP header checksum */ 1415 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1416 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1417 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1418 1419 /* Check TCP/UDP checksum */ 1420 if ((RL_TCPPKT(rxstat) && 1421 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1422 (RL_UDPPKT(rxstat) && 1423 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1424 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1425 M_UDP_CSUM_IN_OK; 1426 } 1427 #if NVLAN > 0 1428 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1429 m->m_pkthdr.ether_vtag = 1430 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1431 m->m_flags |= M_VLANTAG; 1432 } 1433 #endif 1434 1435 ml_enqueue(&ml, m); 1436 } 1437 1438 if (ifiq_input(&ifp->if_rcv, &ml)) 1439 if_rxr_livelocked(&sc->rl_ldata.rl_rx_ring); 1440 1441 sc->rl_ldata.rl_rx_considx = i; 1442 re_rx_list_fill(sc); 1443 1444 1445 return (rx); 1446 } 1447 1448 int 1449 re_txeof(struct rl_softc *sc) 1450 { 1451 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1452 struct rl_txq *txq; 1453 uint32_t txstat; 1454 unsigned int prod, cons; 1455 unsigned int idx; 1456 int free = 0; 1457 1458 prod = sc->rl_ldata.rl_txq_prodidx; 1459 cons = sc->rl_ldata.rl_txq_considx; 1460 1461 while (prod != cons) { 1462 txq = &sc->rl_ldata.rl_txq[cons]; 1463 1464 idx = txq->txq_descidx; 1465 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD); 1466 txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 1467 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1468 if (ISSET(txstat, RL_TDESC_CMD_OWN)) { 1469 free = 2; 1470 break; 1471 } 1472 1473 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1474 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1475 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1476 m_freem(txq->txq_mbuf); 1477 txq->txq_mbuf = NULL; 1478 1479 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1480 ifp->if_collisions++; 1481 if (txstat & RL_TDESC_STAT_TXERRSUM) 1482 ifp->if_oerrors++; 1483 1484 cons = RL_NEXT_TX_DESC(sc, idx); 1485 free = 1; 1486 } 1487 1488 if (free == 0) 1489 return (0); 1490 1491 sc->rl_ldata.rl_txq_considx = cons; 1492 1493 /* 1494 * Some chips will ignore a second TX request issued while an 1495 * existing transmission is in progress. If the transmitter goes 1496 * idle but there are still packets waiting to be sent, we need 1497 * to restart the channel here to flush them out. This only 1498 * seems to be required with the PCIe devices. 1499 */ 1500 if (ifq_is_oactive(&ifp->if_snd)) 1501 ifq_restart(&ifp->if_snd); 1502 else if (free == 2) 1503 ifq_serialize(&ifp->if_snd, &sc->rl_start); 1504 else 1505 ifp->if_timer = 0; 1506 1507 return (1); 1508 } 1509 1510 void 1511 re_tick(void *xsc) 1512 { 1513 struct rl_softc *sc = xsc; 1514 struct mii_data *mii; 1515 int s; 1516 1517 mii = &sc->sc_mii; 1518 1519 s = splnet(); 1520 1521 mii_tick(mii); 1522 1523 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1524 re_miibus_statchg(&sc->sc_dev); 1525 1526 splx(s); 1527 1528 timeout_add_sec(&sc->timer_handle, 1); 1529 } 1530 1531 int 1532 re_intr(void *arg) 1533 { 1534 struct rl_softc *sc = arg; 1535 struct ifnet *ifp; 1536 u_int16_t status; 1537 int claimed = 0, rx, tx; 1538 1539 ifp = &sc->sc_arpcom.ac_if; 1540 1541 if (!(ifp->if_flags & IFF_RUNNING)) 1542 return (0); 1543 1544 /* Disable interrupts. */ 1545 CSR_WRITE_2(sc, RL_IMR, 0); 1546 1547 rx = tx = 0; 1548 status = CSR_READ_2(sc, RL_ISR); 1549 /* If the card has gone away the read returns 0xffff. */ 1550 if (status == 0xffff) 1551 return (0); 1552 if (status) 1553 CSR_WRITE_2(sc, RL_ISR, status); 1554 1555 if (status & RL_ISR_TIMEOUT_EXPIRED) 1556 claimed = 1; 1557 1558 if (status & RL_INTRS_CPLUS) { 1559 if (status & 1560 (sc->rl_rx_ack | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW)) { 1561 rx |= re_rxeof(sc); 1562 claimed = 1; 1563 } 1564 1565 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1566 tx |= re_txeof(sc); 1567 claimed = 1; 1568 } 1569 1570 if (status & RL_ISR_SYSTEM_ERR) { 1571 KERNEL_LOCK(); 1572 re_init(ifp); 1573 KERNEL_UNLOCK(); 1574 claimed = 1; 1575 } 1576 } 1577 1578 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1579 if (sc->rl_timerintr) { 1580 if ((tx | rx) == 0) { 1581 /* 1582 * Nothing needs to be processed, fallback 1583 * to use TX/RX interrupts. 1584 */ 1585 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1586 1587 /* 1588 * Recollect, mainly to avoid the possible 1589 * race introduced by changing interrupt 1590 * masks. 1591 */ 1592 re_rxeof(sc); 1593 re_txeof(sc); 1594 } else 1595 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1596 } else if (tx | rx) { 1597 /* 1598 * Assume that using simulated interrupt moderation 1599 * (hardware timer based) could reduce the interrupt 1600 * rate. 1601 */ 1602 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1603 } 1604 } 1605 1606 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1607 1608 return (claimed); 1609 } 1610 1611 int 1612 re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m) 1613 { 1614 struct rl_txq *txq; 1615 bus_dmamap_t map; 1616 int error, seg, nsegs, curidx, lastidx, pad; 1617 int off; 1618 struct ip *ip; 1619 struct rl_desc *d; 1620 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1621 1622 /* 1623 * Set up checksum offload. Note: checksum offload bits must 1624 * appear in all descriptors of a multi-descriptor transmit 1625 * attempt. This is according to testing done with an 8169 1626 * chip. This is a requirement. 1627 */ 1628 1629 /* 1630 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1631 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1632 * RL_TDESC_CMD_UDPCSUM does not take affect. 1633 */ 1634 1635 if ((sc->rl_flags & RL_FLAG_JUMBOV2) && 1636 m->m_pkthdr.len > RL_MTU && 1637 (m->m_pkthdr.csum_flags & 1638 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1639 struct mbuf mh, *mp; 1640 1641 mp = m_getptr(m, ETHER_HDR_LEN, &off); 1642 mh.m_flags = 0; 1643 mh.m_data = mtod(mp, caddr_t) + off; 1644 mh.m_next = mp->m_next; 1645 mh.m_pkthdr.len = mp->m_pkthdr.len - ETHER_HDR_LEN; 1646 mh.m_len = mp->m_len - off; 1647 ip = (struct ip *)mh.m_data; 1648 1649 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1650 ip->ip_sum = in_cksum(&mh, sizeof(struct ip)); 1651 if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) 1652 in_delayed_cksum(&mh); 1653 1654 m->m_pkthdr.csum_flags &= 1655 ~(M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT); 1656 } 1657 1658 if ((m->m_pkthdr.csum_flags & 1659 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1660 if (sc->rl_flags & RL_FLAG_DESCV2) { 1661 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1662 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1663 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1664 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1665 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1666 } else { 1667 csum_flags |= RL_TDESC_CMD_IPCSUM; 1668 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1669 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1670 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1671 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1672 } 1673 } 1674 1675 txq = &sc->rl_ldata.rl_txq[idx]; 1676 map = txq->txq_dmamap; 1677 1678 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1679 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1680 switch (error) { 1681 case 0: 1682 break; 1683 1684 case EFBIG: 1685 if (m_defrag(m, M_DONTWAIT) == 0 && 1686 bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1687 BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0) 1688 break; 1689 1690 /* FALLTHROUGH */ 1691 default: 1692 return (0); 1693 } 1694 1695 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1696 BUS_DMASYNC_PREWRITE); 1697 1698 nsegs = map->dm_nsegs; 1699 pad = 0; 1700 1701 /* 1702 * With some of the Realtek chips, using the checksum offload 1703 * support in conjunction with the autopadding feature results 1704 * in the transmission of corrupt frames. For example, if we 1705 * need to send a really small IP fragment that's less than 60 1706 * bytes in size, and IP header checksumming is enabled, the 1707 * resulting ethernet frame that appears on the wire will 1708 * have garbled payload. To work around this, if TX IP checksum 1709 * offload is enabled, we always manually pad short frames out 1710 * to the minimum ethernet frame size. 1711 */ 1712 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 1713 m->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 1714 (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) != 0) { 1715 pad = 1; 1716 nsegs++; 1717 } 1718 1719 /* 1720 * Set up hardware VLAN tagging. Note: vlan tag info must 1721 * appear in all descriptors of a multi-descriptor 1722 * transmission attempt. 1723 */ 1724 #if NVLAN > 0 1725 if (m->m_flags & M_VLANTAG) 1726 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1727 RL_TDESC_VLANCTL_TAG; 1728 #endif 1729 1730 /* 1731 * Map the segment array into descriptors. Note that we set the 1732 * start-of-frame and end-of-frame markers for either TX or RX, but 1733 * they really only have meaning in the TX case. (In the RX case, 1734 * it's the chip that tells us where packets begin and end.) 1735 * We also keep track of the end of the ring and set the 1736 * end-of-ring bits as needed, and we set the ownership bits 1737 * in all except the very first descriptor. (The caller will 1738 * set this descriptor later when it start transmission or 1739 * reception.) 1740 */ 1741 curidx = idx; 1742 cmdstat = RL_TDESC_CMD_SOF; 1743 1744 for (seg = 0; seg < map->dm_nsegs; seg++) { 1745 d = &sc->rl_ldata.rl_tx_list[curidx]; 1746 1747 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1748 1749 d->rl_vlanctl = htole32(vlanctl); 1750 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1751 cmdstat |= csum_flags | map->dm_segs[seg].ds_len; 1752 1753 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1754 cmdstat |= RL_TDESC_CMD_EOR; 1755 1756 d->rl_cmdstat = htole32(cmdstat); 1757 1758 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1759 1760 lastidx = curidx; 1761 cmdstat = RL_TDESC_CMD_OWN; 1762 curidx = RL_NEXT_TX_DESC(sc, curidx); 1763 } 1764 1765 if (pad) { 1766 d = &sc->rl_ldata.rl_tx_list[curidx]; 1767 1768 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1769 1770 d->rl_vlanctl = htole32(vlanctl); 1771 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1772 cmdstat = csum_flags | 1773 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1774 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1775 1776 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) 1777 cmdstat |= RL_TDESC_CMD_EOR; 1778 1779 d->rl_cmdstat = htole32(cmdstat); 1780 1781 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1782 1783 lastidx = curidx; 1784 } 1785 1786 /* d is already pointing at the last descriptor */ 1787 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1788 1789 /* Transfer ownership of packet to the chip. */ 1790 d = &sc->rl_ldata.rl_tx_list[idx]; 1791 1792 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); 1793 d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); 1794 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); 1795 1796 /* update info of TX queue and descriptors */ 1797 txq->txq_mbuf = m; 1798 txq->txq_descidx = lastidx; 1799 1800 return (nsegs); 1801 } 1802 1803 void 1804 re_txstart(void *xsc) 1805 { 1806 struct rl_softc *sc = xsc; 1807 1808 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1809 } 1810 1811 /* 1812 * Main transmit routine for C+ and gigE NICs. 1813 */ 1814 1815 void 1816 re_start(struct ifqueue *ifq) 1817 { 1818 struct ifnet *ifp = ifq->ifq_if; 1819 struct rl_softc *sc = ifp->if_softc; 1820 struct mbuf *m; 1821 unsigned int idx; 1822 unsigned int free, used; 1823 int post = 0; 1824 1825 if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) { 1826 ifq_purge(ifq); 1827 return; 1828 } 1829 1830 free = sc->rl_ldata.rl_txq_considx; 1831 idx = sc->rl_ldata.rl_txq_prodidx; 1832 if (free <= idx) 1833 free += sc->rl_ldata.rl_tx_desc_cnt; 1834 free -= idx; 1835 1836 for (;;) { 1837 if (free < sc->rl_ldata.rl_tx_ndescs + 2) { 1838 ifq_set_oactive(ifq); 1839 break; 1840 } 1841 1842 m = ifq_dequeue(ifq); 1843 if (m == NULL) 1844 break; 1845 1846 used = re_encap(sc, idx, m); 1847 if (used == 0) { 1848 m_freem(m); 1849 continue; 1850 } 1851 1852 #if NBPFILTER > 0 1853 if (ifp->if_bpf) 1854 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1855 #endif 1856 1857 KASSERT(used <= free); 1858 free -= used; 1859 1860 idx += used; 1861 if (idx >= sc->rl_ldata.rl_tx_desc_cnt) 1862 idx -= sc->rl_ldata.rl_tx_desc_cnt; 1863 1864 post = 1; 1865 } 1866 1867 if (post == 0) 1868 return; 1869 1870 ifp->if_timer = 5; 1871 sc->rl_ldata.rl_txq_prodidx = idx; 1872 ifq_serialize(ifq, &sc->rl_start); 1873 } 1874 1875 int 1876 re_init(struct ifnet *ifp) 1877 { 1878 struct rl_softc *sc = ifp->if_softc; 1879 u_int16_t cfg; 1880 uint32_t rxcfg; 1881 int s; 1882 union { 1883 u_int32_t align_dummy; 1884 u_char eaddr[ETHER_ADDR_LEN]; 1885 } eaddr; 1886 1887 s = splnet(); 1888 1889 /* 1890 * Cancel pending I/O and free all RX/TX buffers. 1891 */ 1892 re_stop(ifp); 1893 1894 /* Put controller into known state. */ 1895 re_reset(sc); 1896 1897 /* 1898 * Enable C+ RX and TX mode, as well as VLAN stripping and 1899 * RX checksum offload. We must configure the C+ register 1900 * before all others. 1901 */ 1902 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1903 RL_CPLUSCMD_RXCSUM_ENB; 1904 1905 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1906 cfg |= RL_CPLUSCMD_VLANSTRIP; 1907 1908 if (sc->rl_flags & RL_FLAG_MACSTAT) 1909 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1910 else 1911 cfg |= RL_CPLUSCMD_RXENB; 1912 1913 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1914 1915 /* 1916 * Init our MAC address. Even though the chipset 1917 * documentation doesn't mention it, we need to enter "Config 1918 * register write enable" mode to modify the ID registers. 1919 */ 1920 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1921 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1922 CSR_WRITE_4(sc, RL_IDR4, 1923 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1924 CSR_WRITE_4(sc, RL_IDR0, 1925 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1926 /* 1927 * Default on PC Engines APU1 is to have all LEDs off unless 1928 * there is network activity. Override to provide a link status 1929 * LED. 1930 */ 1931 if (sc->sc_hwrev == RL_HWREV_8168E && 1932 hw_vendor != NULL && hw_prod != NULL && 1933 strcmp(hw_vendor, "PC Engines") == 0 && 1934 strcmp(hw_prod, "APU") == 0) { 1935 CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED); 1936 CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4); 1937 } 1938 /* 1939 * Protect config register again 1940 */ 1941 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1942 1943 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1944 re_set_jumbo(sc); 1945 1946 /* 1947 * For C+ mode, initialize the RX descriptors and mbufs. 1948 */ 1949 re_rx_list_init(sc); 1950 re_tx_list_init(sc); 1951 1952 /* 1953 * Load the addresses of the RX and TX lists into the chip. 1954 */ 1955 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1956 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1957 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1958 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1959 1960 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1961 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1962 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1963 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1964 1965 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1966 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1967 ~0x00080000); 1968 1969 /* 1970 * Set the initial TX and RX configuration. 1971 */ 1972 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1973 1974 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1975 1976 rxcfg = RL_RXCFG_CONFIG; 1977 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1978 rxcfg |= RL_RXCFG_EARLYOFF; 1979 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1980 rxcfg |= RL_RXCFG_EARLYOFFV2; 1981 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 1982 1983 /* 1984 * Enable transmit and receive. 1985 */ 1986 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); 1987 1988 /* Program promiscuous mode and multicast filters. */ 1989 re_iff(sc); 1990 1991 /* 1992 * Enable interrupts. 1993 */ 1994 re_setup_intr(sc, 1, sc->rl_imtype); 1995 CSR_WRITE_2(sc, RL_ISR, sc->rl_intrs); 1996 1997 /* Start RX/TX process. */ 1998 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1999 2000 /* 2001 * For 8169 gigE NICs, set the max allowed RX packet 2002 * size so we can receive jumbo frames. 2003 */ 2004 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 2005 if (sc->rl_flags & RL_FLAG_PCIE && 2006 (sc->rl_flags & RL_FLAG_JUMBOV2) == 0) 2007 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 2008 else 2009 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2010 } 2011 2012 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 2013 RL_CFG1_DRVLOAD); 2014 2015 ifp->if_flags |= IFF_RUNNING; 2016 ifq_clr_oactive(&ifp->if_snd); 2017 2018 splx(s); 2019 2020 sc->rl_flags &= ~RL_FLAG_LINK; 2021 mii_mediachg(&sc->sc_mii); 2022 2023 timeout_add_sec(&sc->timer_handle, 1); 2024 2025 return (0); 2026 } 2027 2028 /* 2029 * Set media options. 2030 */ 2031 int 2032 re_ifmedia_upd(struct ifnet *ifp) 2033 { 2034 struct rl_softc *sc; 2035 2036 sc = ifp->if_softc; 2037 2038 return (mii_mediachg(&sc->sc_mii)); 2039 } 2040 2041 /* 2042 * Report current media status. 2043 */ 2044 void 2045 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2046 { 2047 struct rl_softc *sc; 2048 2049 sc = ifp->if_softc; 2050 2051 mii_pollstat(&sc->sc_mii); 2052 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2053 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2054 } 2055 2056 int 2057 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2058 { 2059 struct rl_softc *sc = ifp->if_softc; 2060 struct ifreq *ifr = (struct ifreq *) data; 2061 int s, error = 0; 2062 2063 s = splnet(); 2064 2065 switch(command) { 2066 case SIOCSIFADDR: 2067 ifp->if_flags |= IFF_UP; 2068 if (!(ifp->if_flags & IFF_RUNNING)) 2069 re_init(ifp); 2070 break; 2071 case SIOCSIFFLAGS: 2072 if (ifp->if_flags & IFF_UP) { 2073 if (ifp->if_flags & IFF_RUNNING) 2074 error = ENETRESET; 2075 else 2076 re_init(ifp); 2077 } else { 2078 if (ifp->if_flags & IFF_RUNNING) 2079 re_stop(ifp); 2080 } 2081 break; 2082 case SIOCGIFMEDIA: 2083 case SIOCSIFMEDIA: 2084 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2085 break; 2086 case SIOCGIFRXR: 2087 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 2088 NULL, RL_FRAMELEN(sc->rl_max_mtu), &sc->rl_ldata.rl_rx_ring); 2089 break; 2090 default: 2091 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2092 } 2093 2094 if (error == ENETRESET) { 2095 if (ifp->if_flags & IFF_RUNNING) 2096 re_iff(sc); 2097 error = 0; 2098 } 2099 2100 splx(s); 2101 return (error); 2102 } 2103 2104 void 2105 re_watchdog(struct ifnet *ifp) 2106 { 2107 struct rl_softc *sc; 2108 int s; 2109 2110 sc = ifp->if_softc; 2111 s = splnet(); 2112 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2113 2114 re_init(ifp); 2115 2116 splx(s); 2117 } 2118 2119 /* 2120 * Stop the adapter and free any mbufs allocated to the 2121 * RX and TX lists. 2122 */ 2123 void 2124 re_stop(struct ifnet *ifp) 2125 { 2126 struct rl_softc *sc; 2127 int i; 2128 2129 sc = ifp->if_softc; 2130 2131 ifp->if_timer = 0; 2132 sc->rl_flags &= ~RL_FLAG_LINK; 2133 sc->rl_timerintr = 0; 2134 2135 timeout_del(&sc->timer_handle); 2136 ifp->if_flags &= ~IFF_RUNNING; 2137 2138 /* 2139 * Disable accepting frames to put RX MAC into idle state. 2140 * Otherwise it's possible to get frames while stop command 2141 * execution is in progress and controller can DMA the frame 2142 * to already freed RX buffer during that period. 2143 */ 2144 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 2145 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV | 2146 RL_RXCFG_RX_MULTI)); 2147 2148 if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) { 2149 for (i = RL_TIMEOUT; i > 0; i--) { 2150 if ((CSR_READ_1(sc, sc->rl_txstart) & 2151 RL_TXSTART_START) == 0) 2152 break; 2153 DELAY(20); 2154 } 2155 if (i == 0) 2156 printf("%s: stopping TX poll timed out!\n", 2157 sc->sc_dev.dv_xname); 2158 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2159 } else if (sc->rl_flags & RL_FLAG_CMDSTOP) { 2160 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 2161 RL_CMD_RX_ENB); 2162 if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) { 2163 for (i = RL_TIMEOUT; i > 0; i--) { 2164 if ((CSR_READ_4(sc, RL_TXCFG) & 2165 RL_TXCFG_QUEUE_EMPTY) != 0) 2166 break; 2167 DELAY(100); 2168 } 2169 if (i == 0) 2170 printf("%s: stopping TXQ timed out!\n", 2171 sc->sc_dev.dv_xname); 2172 } 2173 } else 2174 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2175 DELAY(1000); 2176 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2177 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2178 2179 intr_barrier(sc->sc_ih); 2180 ifq_barrier(&ifp->if_snd); 2181 2182 ifq_clr_oactive(&ifp->if_snd); 2183 mii_down(&sc->sc_mii); 2184 2185 if (sc->rl_head != NULL) { 2186 m_freem(sc->rl_head); 2187 sc->rl_head = sc->rl_tail = NULL; 2188 } 2189 2190 /* Free the TX list buffers. */ 2191 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 2192 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2193 bus_dmamap_unload(sc->sc_dmat, 2194 sc->rl_ldata.rl_txq[i].txq_dmamap); 2195 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2196 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2197 } 2198 } 2199 2200 /* Free the RX list buffers. */ 2201 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2202 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2203 bus_dmamap_unload(sc->sc_dmat, 2204 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2205 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2206 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2207 } 2208 } 2209 } 2210 2211 void 2212 re_setup_hw_im(struct rl_softc *sc) 2213 { 2214 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2215 2216 /* 2217 * Interrupt moderation 2218 * 2219 * 0xABCD 2220 * A - unknown (maybe TX related) 2221 * B - TX timer (unit: 25us) 2222 * C - unknown (maybe RX related) 2223 * D - RX timer (unit: 25us) 2224 * 2225 * 2226 * re(4)'s interrupt moderation is actually controlled by 2227 * two variables, like most other NICs (bge, bnx etc.) 2228 * o timer 2229 * o number of packets [P] 2230 * 2231 * The logic relationship between these two variables is 2232 * similar to other NICs too: 2233 * if (timer expire || packets > [P]) 2234 * Interrupt is delivered 2235 * 2236 * Currently we only know how to set 'timer', but not 2237 * 'number of packets', which should be ~30, as far as I 2238 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2239 */ 2240 CSR_WRITE_2(sc, RL_IM, 2241 RL_IM_RXTIME(sc->rl_rx_time) | 2242 RL_IM_TXTIME(sc->rl_tx_time) | 2243 RL_IM_MAGIC); 2244 } 2245 2246 void 2247 re_disable_hw_im(struct rl_softc *sc) 2248 { 2249 if (sc->rl_flags & RL_FLAG_HWIM) 2250 CSR_WRITE_2(sc, RL_IM, 0); 2251 } 2252 2253 void 2254 re_setup_sim_im(struct rl_softc *sc) 2255 { 2256 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2257 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2258 else { 2259 u_int32_t nticks; 2260 2261 /* 2262 * Datasheet says tick decreases at bus speed, 2263 * but it seems the clock runs a little bit 2264 * faster, so we do some compensation here. 2265 */ 2266 nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2267 CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks); 2268 } 2269 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2270 sc->rl_timerintr = 1; 2271 } 2272 2273 void 2274 re_disable_sim_im(struct rl_softc *sc) 2275 { 2276 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2277 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2278 else 2279 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2280 sc->rl_timerintr = 0; 2281 } 2282 2283 void 2284 re_config_imtype(struct rl_softc *sc, int imtype) 2285 { 2286 switch (imtype) { 2287 case RL_IMTYPE_HW: 2288 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2289 /* FALLTHROUGH */ 2290 case RL_IMTYPE_NONE: 2291 sc->rl_intrs = RL_INTRS_CPLUS; 2292 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2293 RL_ISR_RX_OVERRUN; 2294 sc->rl_tx_ack = RL_ISR_TX_OK; 2295 break; 2296 2297 case RL_IMTYPE_SIM: 2298 sc->rl_intrs = RL_INTRS_TIMER; 2299 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2300 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2301 break; 2302 2303 default: 2304 panic("%s: unknown imtype %d", 2305 sc->sc_dev.dv_xname, imtype); 2306 } 2307 } 2308 2309 void 2310 re_set_jumbo(struct rl_softc *sc) 2311 { 2312 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2313 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) | 2314 RL_CFG3_JUMBO_EN0); 2315 2316 switch (sc->sc_hwrev) { 2317 case RL_HWREV_8168DP: 2318 break; 2319 case RL_HWREV_8168E: 2320 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2321 RL_CFG4_8168E_JUMBO_EN1); 2322 break; 2323 default: 2324 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2325 RL_CFG4_JUMBO_EN1); 2326 break; 2327 } 2328 2329 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2330 } 2331 2332 void 2333 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2334 { 2335 re_config_imtype(sc, imtype); 2336 2337 if (enable_intrs) 2338 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2339 else 2340 CSR_WRITE_2(sc, RL_IMR, 0); 2341 2342 switch (imtype) { 2343 case RL_IMTYPE_NONE: 2344 re_disable_sim_im(sc); 2345 re_disable_hw_im(sc); 2346 break; 2347 2348 case RL_IMTYPE_HW: 2349 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2350 re_disable_sim_im(sc); 2351 re_setup_hw_im(sc); 2352 break; 2353 2354 case RL_IMTYPE_SIM: 2355 re_disable_hw_im(sc); 2356 re_setup_sim_im(sc); 2357 break; 2358 2359 default: 2360 panic("%s: unknown imtype %d", 2361 sc->sc_dev.dv_xname, imtype); 2362 } 2363 } 2364 2365 #ifndef SMALL_KERNEL 2366 int 2367 re_wol(struct ifnet *ifp, int enable) 2368 { 2369 struct rl_softc *sc = ifp->if_softc; 2370 u_int8_t val; 2371 2372 if (enable) { 2373 if ((CSR_READ_1(sc, sc->rl_cfg1) & RL_CFG1_PME) == 0) { 2374 printf("%s: power management is disabled, " 2375 "cannot do WOL\n", sc->sc_dev.dv_xname); 2376 return (ENOTSUP); 2377 } 2378 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_AUXPWR) == 0) 2379 printf("%s: no auxiliary power, cannot do WOL from D3 " 2380 "(power-off) state\n", sc->sc_dev.dv_xname); 2381 } 2382 2383 re_iff(sc); 2384 2385 /* Temporarily enable write to configuration registers. */ 2386 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2387 2388 /* Always disable all wake events except magic packet. */ 2389 if (enable) { 2390 val = CSR_READ_1(sc, sc->rl_cfg5); 2391 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2392 RL_CFG5_WOL_BCAST); 2393 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2394 2395 val = CSR_READ_1(sc, sc->rl_cfg3); 2396 val |= RL_CFG3_WOL_MAGIC; 2397 val &= ~RL_CFG3_WOL_LINK; 2398 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2399 } else { 2400 val = CSR_READ_1(sc, sc->rl_cfg5); 2401 val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST | 2402 RL_CFG5_WOL_BCAST); 2403 CSR_WRITE_1(sc, sc->rl_cfg5, val); 2404 2405 val = CSR_READ_1(sc, sc->rl_cfg3); 2406 val &= ~(RL_CFG3_WOL_MAGIC | RL_CFG3_WOL_LINK); 2407 CSR_WRITE_1(sc, sc->rl_cfg3, val); 2408 } 2409 2410 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2411 2412 return (0); 2413 } 2414 #endif 2415 2416 #if NKSTAT > 0 2417 2418 #define RE_DTCCR_CMD (1U << 3) 2419 #define RE_DTCCR_LO 0x10 2420 #define RE_DTCCR_HI 0x14 2421 2422 struct re_kstats { 2423 struct kstat_kv tx_ok; 2424 struct kstat_kv rx_ok; 2425 struct kstat_kv tx_er; 2426 struct kstat_kv rx_er; 2427 struct kstat_kv miss_pkt; 2428 struct kstat_kv fae; 2429 struct kstat_kv tx_1col; 2430 struct kstat_kv tx_mcol; 2431 struct kstat_kv rx_ok_phy; 2432 struct kstat_kv rx_ok_brd; 2433 struct kstat_kv rx_ok_mul; 2434 struct kstat_kv tx_abt; 2435 struct kstat_kv tx_undrn; 2436 }; 2437 2438 static const struct re_kstats re_kstats_tpl = { 2439 .tx_ok = KSTAT_KV_UNIT_INITIALIZER("TxOk", 2440 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), 2441 .rx_ok = KSTAT_KV_UNIT_INITIALIZER("RxOk", 2442 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), 2443 .tx_er = KSTAT_KV_UNIT_INITIALIZER("TxEr", 2444 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), 2445 .rx_er = KSTAT_KV_UNIT_INITIALIZER("RxEr", 2446 KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS), 2447 .miss_pkt = KSTAT_KV_UNIT_INITIALIZER("MissPkt", 2448 KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS), 2449 .fae = KSTAT_KV_UNIT_INITIALIZER("FAE", 2450 KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS), 2451 .tx_1col = KSTAT_KV_UNIT_INITIALIZER("Tx1Col", 2452 KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS), 2453 .tx_mcol = KSTAT_KV_UNIT_INITIALIZER("TxMCol", 2454 KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS), 2455 .rx_ok_phy = KSTAT_KV_UNIT_INITIALIZER("RxOkPhy", 2456 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), 2457 .rx_ok_brd = KSTAT_KV_UNIT_INITIALIZER("RxOkBrd", 2458 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), 2459 .rx_ok_mul = KSTAT_KV_UNIT_INITIALIZER("RxOkMul", 2460 KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS), 2461 .tx_abt = KSTAT_KV_UNIT_INITIALIZER("TxAbt", 2462 KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS), 2463 .tx_undrn = KSTAT_KV_UNIT_INITIALIZER("TxUndrn", 2464 KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS), 2465 }; 2466 2467 struct re_kstat_softc { 2468 struct re_stats *re_ks_sc_stats; 2469 2470 bus_dmamap_t re_ks_sc_map; 2471 bus_dma_segment_t re_ks_sc_seg; 2472 int re_ks_sc_nsegs; 2473 2474 struct rwlock re_ks_sc_rwl; 2475 }; 2476 2477 static int 2478 re_kstat_read(struct kstat *ks) 2479 { 2480 struct rl_softc *sc = ks->ks_softc; 2481 struct re_kstat_softc *re_ks_sc = ks->ks_ptr; 2482 bus_dmamap_t map; 2483 uint64_t cmd; 2484 uint32_t reg; 2485 uint8_t command; 2486 int tmo; 2487 2488 command = CSR_READ_1(sc, RL_COMMAND); 2489 if (!ISSET(command, RL_CMD_RX_ENB) || command == 0xff) 2490 return (ENETDOWN); 2491 2492 map = re_ks_sc->re_ks_sc_map; 2493 cmd = map->dm_segs[0].ds_addr | RE_DTCCR_CMD; 2494 2495 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2496 BUS_DMASYNC_PREREAD); 2497 2498 CSR_WRITE_4(sc, RE_DTCCR_HI, cmd >> 32); 2499 bus_space_barrier(sc->rl_btag, sc->rl_bhandle, RE_DTCCR_HI, 8, 2500 BUS_SPACE_BARRIER_WRITE); 2501 CSR_WRITE_4(sc, RE_DTCCR_LO, cmd); 2502 bus_space_barrier(sc->rl_btag, sc->rl_bhandle, RE_DTCCR_LO, 4, 2503 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); 2504 2505 tmo = 1000; 2506 do { 2507 reg = CSR_READ_4(sc, RE_DTCCR_LO); 2508 if (!ISSET(reg, RE_DTCCR_CMD)) 2509 break; 2510 2511 delay(10); 2512 bus_space_barrier(sc->rl_btag, sc->rl_bhandle, RE_DTCCR_LO, 4, 2513 BUS_SPACE_BARRIER_READ); 2514 } while (--tmo); 2515 2516 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2517 BUS_DMASYNC_POSTREAD); 2518 2519 if (ISSET(reg, RE_DTCCR_CMD)) 2520 return (EIO); 2521 2522 nanouptime(&ks->ks_updated); 2523 2524 return (0); 2525 } 2526 2527 static int 2528 re_kstat_copy(struct kstat *ks, void *dst) 2529 { 2530 struct re_kstat_softc *re_ks_sc = ks->ks_ptr; 2531 struct re_stats *rs = re_ks_sc->re_ks_sc_stats; 2532 struct re_kstats *kvs = dst; 2533 2534 *kvs = re_kstats_tpl; 2535 kstat_kv_u64(&kvs->tx_ok) = lemtoh64(&rs->re_tx_ok); 2536 kstat_kv_u64(&kvs->rx_ok) = lemtoh64(&rs->re_rx_ok); 2537 kstat_kv_u64(&kvs->tx_er) = lemtoh64(&rs->re_tx_er); 2538 kstat_kv_u32(&kvs->rx_er) = lemtoh32(&rs->re_rx_er); 2539 kstat_kv_u16(&kvs->miss_pkt) = lemtoh16(&rs->re_miss_pkt); 2540 kstat_kv_u16(&kvs->fae) = lemtoh16(&rs->re_fae); 2541 kstat_kv_u32(&kvs->tx_1col) = lemtoh32(&rs->re_tx_1col); 2542 kstat_kv_u32(&kvs->tx_mcol) = lemtoh32(&rs->re_tx_mcol); 2543 kstat_kv_u64(&kvs->rx_ok_phy) = lemtoh64(&rs->re_rx_ok_phy); 2544 kstat_kv_u64(&kvs->rx_ok_brd) = lemtoh64(&rs->re_rx_ok_brd); 2545 kstat_kv_u32(&kvs->rx_ok_mul) = lemtoh32(&rs->re_rx_ok_mul); 2546 kstat_kv_u16(&kvs->tx_abt) = lemtoh16(&rs->re_tx_abt); 2547 kstat_kv_u16(&kvs->tx_undrn) = lemtoh16(&rs->re_tx_undrn); 2548 2549 return (0); 2550 } 2551 2552 void 2553 re_kstat_attach(struct rl_softc *sc) 2554 { 2555 struct re_kstat_softc *re_ks_sc; 2556 struct kstat *ks; 2557 2558 re_ks_sc = malloc(sizeof(*re_ks_sc), M_DEVBUF, M_NOWAIT); 2559 if (re_ks_sc == NULL) { 2560 printf("%s: cannot allocate kstat softc\n", 2561 sc->sc_dev.dv_xname); 2562 return; 2563 } 2564 2565 if (bus_dmamap_create(sc->sc_dmat, 2566 sizeof(struct re_stats), 1, sizeof(struct re_stats), 0, 2567 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 2568 &re_ks_sc->re_ks_sc_map) != 0) { 2569 printf("%s: cannot create counter dma memory map\n", 2570 sc->sc_dev.dv_xname); 2571 goto free; 2572 } 2573 2574 if (bus_dmamem_alloc(sc->sc_dmat, 2575 sizeof(struct re_stats), RE_STATS_ALIGNMENT, 0, 2576 &re_ks_sc->re_ks_sc_seg, 1, &re_ks_sc->re_ks_sc_nsegs, 2577 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { 2578 printf("%s: cannot allocate counter dma memory\n", 2579 sc->sc_dev.dv_xname); 2580 goto destroy; 2581 } 2582 2583 if (bus_dmamem_map(sc->sc_dmat, 2584 &re_ks_sc->re_ks_sc_seg, re_ks_sc->re_ks_sc_nsegs, 2585 sizeof(struct re_stats), (caddr_t *)&re_ks_sc->re_ks_sc_stats, 2586 BUS_DMA_NOWAIT) != 0) { 2587 printf("%s: cannot map counter dma memory\n", 2588 sc->sc_dev.dv_xname); 2589 goto freedma; 2590 } 2591 2592 if (bus_dmamap_load(sc->sc_dmat, re_ks_sc->re_ks_sc_map, 2593 (caddr_t)re_ks_sc->re_ks_sc_stats, sizeof(struct re_stats), 2594 NULL, BUS_DMA_NOWAIT) != 0) { 2595 printf("%s: cannot load counter dma memory\n", 2596 sc->sc_dev.dv_xname); 2597 goto unmap; 2598 } 2599 2600 ks = kstat_create(sc->sc_dev.dv_xname, 0, "re-stats", 0, 2601 KSTAT_T_KV, 0); 2602 if (ks == NULL) { 2603 printf("%s: cannot create re-stats kstat\n", 2604 sc->sc_dev.dv_xname); 2605 goto unload; 2606 } 2607 2608 ks->ks_datalen = sizeof(re_kstats_tpl); 2609 2610 rw_init(&re_ks_sc->re_ks_sc_rwl, "restats"); 2611 kstat_set_wlock(ks, &re_ks_sc->re_ks_sc_rwl); 2612 ks->ks_softc = sc; 2613 ks->ks_ptr = re_ks_sc; 2614 ks->ks_read = re_kstat_read; 2615 ks->ks_copy = re_kstat_copy; 2616 2617 kstat_install(ks); 2618 2619 sc->rl_kstat = ks; 2620 2621 return; 2622 2623 unload: 2624 bus_dmamap_unload(sc->sc_dmat, re_ks_sc->re_ks_sc_map); 2625 unmap: 2626 bus_dmamem_unmap(sc->sc_dmat, 2627 (caddr_t)re_ks_sc->re_ks_sc_stats, sizeof(struct re_stats)); 2628 freedma: 2629 bus_dmamem_free(sc->sc_dmat, &re_ks_sc->re_ks_sc_seg, 2630 re_ks_sc->re_ks_sc_nsegs); 2631 destroy: 2632 bus_dmamap_destroy(sc->sc_dmat, re_ks_sc->re_ks_sc_map); 2633 free: 2634 free(re_ks_sc, M_DEVBUF, sizeof(*re_ks_sc)); 2635 } 2636 2637 void 2638 re_kstat_detach(struct rl_softc *sc) 2639 { 2640 struct kstat *ks = sc->rl_kstat; 2641 struct re_kstat_softc *re_ks_sc; 2642 2643 if (ks == NULL) 2644 return; 2645 2646 kstat_remove(ks); 2647 re_ks_sc = ks->ks_ptr; 2648 kstat_destroy(ks); 2649 2650 bus_dmamap_unload(sc->sc_dmat, re_ks_sc->re_ks_sc_map); 2651 bus_dmamem_unmap(sc->sc_dmat, 2652 (caddr_t)re_ks_sc->re_ks_sc_stats, sizeof(struct re_stats)); 2653 bus_dmamem_free(sc->sc_dmat, &re_ks_sc->re_ks_sc_seg, 2654 re_ks_sc->re_ks_sc_nsegs); 2655 bus_dmamap_destroy(sc->sc_dmat, re_ks_sc->re_ks_sc_map); 2656 free(re_ks_sc, M_DEVBUF, sizeof(*re_ks_sc)); 2657 } 2658 #endif /* NKSTAT > 0 */ 2659