1 /* $OpenBSD: re.c,v 1.154 2014/07/08 05:35:18 dlg Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support RealTek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 124 #include <net/if.h> 125 #include <net/if_dl.h> 126 #include <net/if_media.h> 127 128 #ifdef INET 129 #include <netinet/in.h> 130 #include <netinet/in_systm.h> 131 #include <netinet/ip.h> 132 #include <netinet/if_ether.h> 133 #endif 134 135 #if NVLAN > 0 136 #include <net/if_types.h> 137 #include <net/if_vlan_var.h> 138 #endif 139 140 #if NBPFILTER > 0 141 #include <net/bpf.h> 142 #endif 143 144 #include <dev/mii/mii.h> 145 #include <dev/mii/miivar.h> 146 147 #include <dev/pci/pcireg.h> 148 #include <dev/pci/pcivar.h> 149 150 #include <dev/ic/rtl81x9reg.h> 151 #include <dev/ic/revar.h> 152 153 #ifdef RE_DEBUG 154 int redebug = 0; 155 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 156 #else 157 #define DPRINTF(x) 158 #endif 159 160 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 161 162 int re_encap(struct rl_softc *, struct mbuf *, int *); 163 164 int re_newbuf(struct rl_softc *); 165 int re_rx_list_init(struct rl_softc *); 166 void re_rx_list_fill(struct rl_softc *); 167 int re_tx_list_init(struct rl_softc *); 168 int re_rxeof(struct rl_softc *); 169 int re_txeof(struct rl_softc *); 170 void re_tick(void *); 171 void re_start(struct ifnet *); 172 int re_ioctl(struct ifnet *, u_long, caddr_t); 173 void re_watchdog(struct ifnet *); 174 int re_ifmedia_upd(struct ifnet *); 175 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176 177 void re_eeprom_putbyte(struct rl_softc *, int); 178 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 179 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 180 181 int re_gmii_readreg(struct device *, int, int); 182 void re_gmii_writereg(struct device *, int, int, int); 183 184 int re_miibus_readreg(struct device *, int, int); 185 void re_miibus_writereg(struct device *, int, int, int); 186 void re_miibus_statchg(struct device *); 187 188 void re_iff(struct rl_softc *); 189 190 void re_setup_hw_im(struct rl_softc *); 191 void re_setup_sim_im(struct rl_softc *); 192 void re_disable_hw_im(struct rl_softc *); 193 void re_disable_sim_im(struct rl_softc *); 194 void re_config_imtype(struct rl_softc *, int); 195 void re_setup_intr(struct rl_softc *, int, int); 196 #ifndef SMALL_KERNEL 197 int re_wol(struct ifnet*, int); 198 #endif 199 200 struct cfdriver re_cd = { 201 0, "re", DV_IFNET 202 }; 203 204 #define EE_SET(x) \ 205 CSR_WRITE_1(sc, RL_EECMD, \ 206 CSR_READ_1(sc, RL_EECMD) | x) 207 208 #define EE_CLR(x) \ 209 CSR_WRITE_1(sc, RL_EECMD, \ 210 CSR_READ_1(sc, RL_EECMD) & ~x) 211 212 static const struct re_revision { 213 u_int32_t re_chipid; 214 const char *re_name; 215 } re_revisions[] = { 216 { RL_HWREV_8100, "RTL8100" }, 217 { RL_HWREV_8100E, "RTL8100E" }, 218 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 219 { RL_HWREV_8101, "RTL8101" }, 220 { RL_HWREV_8101E, "RTL8101E" }, 221 { RL_HWREV_8102E, "RTL8102E" }, 222 { RL_HWREV_8106E, "RTL8106E" }, 223 { RL_HWREV_8401E, "RTL8401E" }, 224 { RL_HWREV_8402, "RTL8402" }, 225 { RL_HWREV_8411, "RTL8411" }, 226 { RL_HWREV_8411B, "RTL8411B" }, 227 { RL_HWREV_8102EL, "RTL8102EL" }, 228 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 229 { RL_HWREV_8103E, "RTL8103E" }, 230 { RL_HWREV_8110S, "RTL8110S" }, 231 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 232 { RL_HWREV_8168B_SPIN1, "RTL8168 1" }, 233 { RL_HWREV_8168B_SPIN2, "RTL8168 2" }, 234 { RL_HWREV_8168B_SPIN3, "RTL8168 3" }, 235 { RL_HWREV_8168C, "RTL8168C/8111C" }, 236 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 237 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 238 { RL_HWREV_8168F, "RTL8168F/8111F" }, 239 { RL_HWREV_8168G, "RTL8168G/8111G" }, 240 { RL_HWREV_8168GU, "RTL8168GU/8111GU" }, 241 { RL_HWREV_8105E, "RTL8105E" }, 242 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 243 { RL_HWREV_8168D, "RTL8168D/8111D" }, 244 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 245 { RL_HWREV_8168E, "RTL8168E/8111E" }, 246 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 247 { RL_HWREV_8168EP, "RTL8168EP/8111EP" }, 248 { RL_HWREV_8169, "RTL8169" }, 249 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 250 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 251 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 252 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 253 { RL_HWREV_8169S, "RTL8169S" }, 254 255 { 0, NULL } 256 }; 257 258 259 static inline void 260 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 261 { 262 d->rl_bufaddr_lo = htole32((uint32_t)addr); 263 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 264 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 265 else 266 d->rl_bufaddr_hi = 0; 267 } 268 269 /* 270 * Send a read command and address to the EEPROM, check for ACK. 271 */ 272 void 273 re_eeprom_putbyte(struct rl_softc *sc, int addr) 274 { 275 int d, i; 276 277 d = addr | (RL_9346_READ << sc->rl_eewidth); 278 279 /* 280 * Feed in each bit and strobe the clock. 281 */ 282 283 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 284 if (d & i) 285 EE_SET(RL_EE_DATAIN); 286 else 287 EE_CLR(RL_EE_DATAIN); 288 DELAY(100); 289 EE_SET(RL_EE_CLK); 290 DELAY(150); 291 EE_CLR(RL_EE_CLK); 292 DELAY(100); 293 } 294 } 295 296 /* 297 * Read a word of data stored in the EEPROM at address 'addr.' 298 */ 299 void 300 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 301 { 302 int i; 303 u_int16_t word = 0; 304 305 /* 306 * Send address of word we want to read. 307 */ 308 re_eeprom_putbyte(sc, addr); 309 310 /* 311 * Start reading bits from EEPROM. 312 */ 313 for (i = 0x8000; i; i >>= 1) { 314 EE_SET(RL_EE_CLK); 315 DELAY(100); 316 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 317 word |= i; 318 EE_CLR(RL_EE_CLK); 319 DELAY(100); 320 } 321 322 *dest = word; 323 } 324 325 /* 326 * Read a sequence of words from the EEPROM. 327 */ 328 void 329 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 330 { 331 int i; 332 u_int16_t word = 0, *ptr; 333 334 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 335 336 DELAY(100); 337 338 for (i = 0; i < cnt; i++) { 339 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 340 re_eeprom_getword(sc, off + i, &word); 341 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 342 ptr = (u_int16_t *)(dest + (i * 2)); 343 *ptr = word; 344 } 345 346 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 347 } 348 349 int 350 re_gmii_readreg(struct device *self, int phy, int reg) 351 { 352 struct rl_softc *sc = (struct rl_softc *)self; 353 u_int32_t rval; 354 int i; 355 356 if (phy != 7) 357 return (0); 358 359 /* Let the rgephy driver read the GMEDIASTAT register */ 360 361 if (reg == RL_GMEDIASTAT) { 362 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 363 return (rval); 364 } 365 366 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 367 368 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 369 rval = CSR_READ_4(sc, RL_PHYAR); 370 if (rval & RL_PHYAR_BUSY) 371 break; 372 DELAY(25); 373 } 374 375 if (i == RL_PHY_TIMEOUT) { 376 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 377 return (0); 378 } 379 380 DELAY(20); 381 382 return (rval & RL_PHYAR_PHYDATA); 383 } 384 385 void 386 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 387 { 388 struct rl_softc *sc = (struct rl_softc *)dev; 389 u_int32_t rval; 390 int i; 391 392 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 393 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 394 395 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 396 rval = CSR_READ_4(sc, RL_PHYAR); 397 if (!(rval & RL_PHYAR_BUSY)) 398 break; 399 DELAY(25); 400 } 401 402 if (i == RL_PHY_TIMEOUT) 403 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 404 405 DELAY(20); 406 } 407 408 int 409 re_miibus_readreg(struct device *dev, int phy, int reg) 410 { 411 struct rl_softc *sc = (struct rl_softc *)dev; 412 u_int16_t rval = 0; 413 u_int16_t re8139_reg = 0; 414 int s; 415 416 s = splnet(); 417 418 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 419 rval = re_gmii_readreg(dev, phy, reg); 420 splx(s); 421 return (rval); 422 } 423 424 /* Pretend the internal PHY is only at address 0 */ 425 if (phy) { 426 splx(s); 427 return (0); 428 } 429 switch(reg) { 430 case MII_BMCR: 431 re8139_reg = RL_BMCR; 432 break; 433 case MII_BMSR: 434 re8139_reg = RL_BMSR; 435 break; 436 case MII_ANAR: 437 re8139_reg = RL_ANAR; 438 break; 439 case MII_ANER: 440 re8139_reg = RL_ANER; 441 break; 442 case MII_ANLPAR: 443 re8139_reg = RL_LPAR; 444 break; 445 case MII_PHYIDR1: 446 case MII_PHYIDR2: 447 splx(s); 448 return (0); 449 /* 450 * Allow the rlphy driver to read the media status 451 * register. If we have a link partner which does not 452 * support NWAY, this is the register which will tell 453 * us the results of parallel detection. 454 */ 455 case RL_MEDIASTAT: 456 rval = CSR_READ_1(sc, RL_MEDIASTAT); 457 splx(s); 458 return (rval); 459 default: 460 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 461 splx(s); 462 return (0); 463 } 464 rval = CSR_READ_2(sc, re8139_reg); 465 if (re8139_reg == RL_BMCR) { 466 /* 8139C+ has different bit layout. */ 467 rval &= ~(BMCR_LOOP | BMCR_ISO); 468 } 469 splx(s); 470 return (rval); 471 } 472 473 void 474 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 475 { 476 struct rl_softc *sc = (struct rl_softc *)dev; 477 u_int16_t re8139_reg = 0; 478 int s; 479 480 s = splnet(); 481 482 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 483 re_gmii_writereg(dev, phy, reg, data); 484 splx(s); 485 return; 486 } 487 488 /* Pretend the internal PHY is only at address 0 */ 489 if (phy) { 490 splx(s); 491 return; 492 } 493 switch(reg) { 494 case MII_BMCR: 495 re8139_reg = RL_BMCR; 496 /* 8139C+ has different bit layout. */ 497 data &= ~(BMCR_LOOP | BMCR_ISO); 498 break; 499 case MII_BMSR: 500 re8139_reg = RL_BMSR; 501 break; 502 case MII_ANAR: 503 re8139_reg = RL_ANAR; 504 break; 505 case MII_ANER: 506 re8139_reg = RL_ANER; 507 break; 508 case MII_ANLPAR: 509 re8139_reg = RL_LPAR; 510 break; 511 case MII_PHYIDR1: 512 case MII_PHYIDR2: 513 splx(s); 514 return; 515 break; 516 default: 517 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 518 splx(s); 519 return; 520 } 521 CSR_WRITE_2(sc, re8139_reg, data); 522 splx(s); 523 } 524 525 void 526 re_miibus_statchg(struct device *dev) 527 { 528 } 529 530 void 531 re_iff(struct rl_softc *sc) 532 { 533 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 534 int h = 0; 535 u_int32_t hashes[2]; 536 u_int32_t rxfilt; 537 struct arpcom *ac = &sc->sc_arpcom; 538 struct ether_multi *enm; 539 struct ether_multistep step; 540 541 rxfilt = CSR_READ_4(sc, RL_RXCFG); 542 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 543 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 544 ifp->if_flags &= ~IFF_ALLMULTI; 545 546 /* 547 * Always accept frames destined to our station address. 548 * Always accept broadcast frames. 549 */ 550 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 551 552 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 553 ifp->if_flags |= IFF_ALLMULTI; 554 rxfilt |= RL_RXCFG_RX_MULTI; 555 if (ifp->if_flags & IFF_PROMISC) 556 rxfilt |= RL_RXCFG_RX_ALLPHYS; 557 hashes[0] = hashes[1] = 0xFFFFFFFF; 558 } else { 559 rxfilt |= RL_RXCFG_RX_MULTI; 560 /* Program new filter. */ 561 bzero(hashes, sizeof(hashes)); 562 563 ETHER_FIRST_MULTI(step, ac, enm); 564 while (enm != NULL) { 565 h = ether_crc32_be(enm->enm_addrlo, 566 ETHER_ADDR_LEN) >> 26; 567 568 if (h < 32) 569 hashes[0] |= (1 << h); 570 else 571 hashes[1] |= (1 << (h - 32)); 572 573 ETHER_NEXT_MULTI(step, enm); 574 } 575 } 576 577 /* 578 * For some unfathomable reason, RealTek decided to reverse 579 * the order of the multicast hash registers in the PCI Express 580 * parts. This means we have to write the hash pattern in reverse 581 * order for those devices. 582 */ 583 if (sc->rl_flags & RL_FLAG_INVMAR) { 584 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 585 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 586 } else { 587 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 588 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 589 } 590 591 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 592 } 593 594 void 595 re_reset(struct rl_softc *sc) 596 { 597 int i; 598 599 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 600 601 for (i = 0; i < RL_TIMEOUT; i++) { 602 DELAY(10); 603 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 604 break; 605 } 606 if (i == RL_TIMEOUT) 607 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 608 609 if (sc->rl_flags & RL_FLAG_MACLDPS) 610 CSR_WRITE_1(sc, RL_LDPS, 1); 611 } 612 613 #ifdef __armish__ 614 /* 615 * Thecus N2100 doesn't store the full mac address in eeprom 616 * so we read the old mac address from the device before the reset 617 * in hopes that the proper mac address is already there. 618 */ 619 union { 620 u_int32_t eaddr_word[2]; 621 u_char eaddr[ETHER_ADDR_LEN]; 622 } boot_eaddr; 623 int boot_eaddr_valid; 624 #endif /* __armish__ */ 625 /* 626 * Attach the interface. Allocate softc structures, do ifmedia 627 * setup and ethernet/BPF attach. 628 */ 629 int 630 re_attach(struct rl_softc *sc, const char *intrstr) 631 { 632 u_char eaddr[ETHER_ADDR_LEN]; 633 u_int16_t as[ETHER_ADDR_LEN / 2]; 634 struct ifnet *ifp; 635 u_int16_t re_did = 0; 636 int error = 0, i; 637 const struct re_revision *rr; 638 const char *re_name = NULL; 639 640 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 641 642 switch (sc->sc_hwrev) { 643 case RL_HWREV_8139CPLUS: 644 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_AUTOPAD; 645 break; 646 case RL_HWREV_8100E: 647 case RL_HWREV_8100E_SPIN2: 648 case RL_HWREV_8101E: 649 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 650 RL_FLAG_PHYWAKE; 651 break; 652 case RL_HWREV_8103E: 653 sc->rl_flags |= RL_FLAG_MACSLEEP; 654 /* FALLTHROUGH */ 655 case RL_HWREV_8102E: 656 case RL_HWREV_8102EL: 657 case RL_HWREV_8102EL_SPIN1: 658 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 659 RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 660 RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 661 break; 662 case RL_HWREV_8401E: 663 case RL_HWREV_8402: 664 case RL_HWREV_8105E: 665 case RL_HWREV_8105E_SPIN1: 666 case RL_HWREV_8106E: 667 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 668 RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | 669 RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 670 RL_FLAG_NOJUMBO; 671 break; 672 case RL_HWREV_8168B_SPIN1: 673 case RL_HWREV_8168B_SPIN2: 674 case RL_HWREV_8168B_SPIN3: 675 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 676 RL_FLAG_MACSTAT | RL_FLAG_HWIM; 677 break; 678 case RL_HWREV_8168C_SPIN2: 679 sc->rl_flags |= RL_FLAG_MACSLEEP; 680 /* FALLTHROUGH */ 681 case RL_HWREV_8168C: 682 case RL_HWREV_8168CP: 683 case RL_HWREV_8168DP: 684 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 685 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 686 RL_FLAG_HWIM | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 687 /* 688 * These controllers support jumbo frame but it seems 689 * that enabling it requires touching additional magic 690 * registers. Depending on MAC revisions some 691 * controllers need to disable checksum offload. So 692 * disable jumbo frame until I have better idea what 693 * it really requires to make it support. 694 * RTL8168C/CP : supports up to 6KB jumbo frame. 695 * RTL8111C/CP : supports up to 9KB jumbo frame. 696 */ 697 sc->rl_flags |= RL_FLAG_NOJUMBO; 698 break; 699 case RL_HWREV_8168D: 700 case RL_HWREV_8168E: 701 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 702 RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | 703 RL_FLAG_MACSTAT | RL_FLAG_HWIM | RL_FLAG_CMDSTOP | 704 RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO; 705 break; 706 case RL_HWREV_8168E_VL: 707 case RL_HWREV_8168F: 708 sc->rl_flags |= RL_FLAG_EARLYOFF; 709 /* FALLTHROUGH */ 710 case RL_HWREV_8411: 711 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 712 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 713 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO; 714 break; 715 case RL_HWREV_8168EP: 716 case RL_HWREV_8168G: 717 case RL_HWREV_8411B: 718 case RL_HWREV_8168GU: 719 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 720 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 721 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO | 722 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 723 break; 724 case RL_HWREV_8169_8110SB: 725 case RL_HWREV_8169_8110SBL: 726 case RL_HWREV_8169_8110SCd: 727 case RL_HWREV_8169_8110SCe: 728 sc->rl_flags |= RL_FLAG_PHYWAKE; 729 /* FALLTHROUGH */ 730 case RL_HWREV_8169: 731 case RL_HWREV_8169S: 732 case RL_HWREV_8110S: 733 sc->rl_flags |= RL_FLAG_MACLDPS; 734 break; 735 default: 736 break; 737 } 738 739 /* Reset the adapter. */ 740 re_reset(sc); 741 742 sc->rl_tx_time = 5; /* 125us */ 743 sc->rl_rx_time = 2; /* 50us */ 744 if (sc->rl_flags & RL_FLAG_PCIE) 745 sc->rl_sim_time = 75; /* 75us */ 746 else 747 sc->rl_sim_time = 125; /* 125us */ 748 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 749 750 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 751 sc->rl_bus_speed = 33; /* XXX */ 752 else if (sc->rl_flags & RL_FLAG_PCIE) 753 sc->rl_bus_speed = 125; 754 else { 755 u_int8_t cfg2; 756 757 cfg2 = CSR_READ_1(sc, RL_CFG2); 758 switch (cfg2 & RL_CFG2_PCI_MASK) { 759 case RL_CFG2_PCI_33MHZ: 760 sc->rl_bus_speed = 33; 761 break; 762 case RL_CFG2_PCI_66MHZ: 763 sc->rl_bus_speed = 66; 764 break; 765 default: 766 printf("%s: unknown bus speed, assume 33MHz\n", 767 sc->sc_dev.dv_xname); 768 sc->rl_bus_speed = 33; 769 break; 770 } 771 772 if (cfg2 & RL_CFG2_PCI_64BIT) 773 sc->rl_flags |= RL_FLAG_PCI64; 774 } 775 776 re_config_imtype(sc, sc->rl_imtype); 777 778 if (sc->rl_flags & RL_FLAG_PAR) { 779 /* 780 * XXX Should have a better way to extract station 781 * address from EEPROM. 782 */ 783 for (i = 0; i < ETHER_ADDR_LEN; i++) 784 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 785 } else { 786 sc->rl_eewidth = RL_9356_ADDR_LEN; 787 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 788 if (re_did != 0x8129) 789 sc->rl_eewidth = RL_9346_ADDR_LEN; 790 791 /* 792 * Get station address from the EEPROM. 793 */ 794 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 795 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 796 as[i] = letoh16(as[i]); 797 bcopy(as, eaddr, sizeof(eaddr)); 798 799 #ifdef __armish__ 800 /* 801 * On the Thecus N2100, the MAC address in the EEPROM is 802 * always 00:14:fd:10:00:00. The proper MAC address is 803 * stored in flash. Fortunately RedBoot configures the 804 * proper MAC address (for the first onboard interface) 805 * which we can read from the IDR. 806 */ 807 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && 808 eaddr[2] == 0xfd && eaddr[3] == 0x10 && 809 eaddr[4] == 0x00 && eaddr[5] == 0x00) { 810 if (boot_eaddr_valid == 0) { 811 boot_eaddr.eaddr_word[1] = 812 letoh32(CSR_READ_4(sc, RL_IDR4)); 813 boot_eaddr.eaddr_word[0] = 814 letoh32(CSR_READ_4(sc, RL_IDR0)); 815 boot_eaddr_valid = 1; 816 } 817 818 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr)); 819 eaddr[5] += sc->sc_dev.dv_unit; 820 } 821 #endif 822 } 823 824 /* 825 * Set RX length mask, TX poll request register 826 * and TX descriptor count. 827 */ 828 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 829 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 830 sc->rl_txstart = RL_TXSTART; 831 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139; 832 } else { 833 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 834 sc->rl_txstart = RL_GTXSTART; 835 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169; 836 } 837 838 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 839 840 for (rr = re_revisions; rr->re_name != NULL; rr++) { 841 if (rr->re_chipid == sc->sc_hwrev) 842 re_name = rr->re_name; 843 } 844 845 if (re_name == NULL) 846 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 847 else 848 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 849 850 printf(", %s, address %s\n", intrstr, 851 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 852 853 if (sc->rl_ldata.rl_tx_desc_cnt > 854 PAGE_SIZE / sizeof(struct rl_desc)) { 855 sc->rl_ldata.rl_tx_desc_cnt = 856 PAGE_SIZE / sizeof(struct rl_desc); 857 } 858 859 /* Allocate DMA'able memory for the TX ring */ 860 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 861 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 862 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 863 BUS_DMA_ZERO)) != 0) { 864 printf("%s: can't allocate tx listseg, error = %d\n", 865 sc->sc_dev.dv_xname, error); 866 goto fail_0; 867 } 868 869 /* Load the map for the TX ring. */ 870 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 871 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 872 (caddr_t *)&sc->rl_ldata.rl_tx_list, 873 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 874 printf("%s: can't map tx list, error = %d\n", 875 sc->sc_dev.dv_xname, error); 876 goto fail_1; 877 } 878 879 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 880 RL_TX_LIST_SZ(sc), 0, 0, 881 &sc->rl_ldata.rl_tx_list_map)) != 0) { 882 printf("%s: can't create tx list map, error = %d\n", 883 sc->sc_dev.dv_xname, error); 884 goto fail_2; 885 } 886 887 if ((error = bus_dmamap_load(sc->sc_dmat, 888 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 889 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 890 printf("%s: can't load tx list, error = %d\n", 891 sc->sc_dev.dv_xname, error); 892 goto fail_3; 893 } 894 895 /* Create DMA maps for TX buffers */ 896 for (i = 0; i < RL_TX_QLEN; i++) { 897 error = bus_dmamap_create(sc->sc_dmat, 898 RL_JUMBO_FRAMELEN, RL_NTXSEGS, RL_JUMBO_FRAMELEN, 899 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap); 900 if (error) { 901 printf("%s: can't create DMA map for TX\n", 902 sc->sc_dev.dv_xname); 903 goto fail_4; 904 } 905 } 906 907 /* Allocate DMA'able memory for the RX ring */ 908 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ, 909 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 910 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 911 BUS_DMA_ZERO)) != 0) { 912 printf("%s: can't allocate rx listnseg, error = %d\n", 913 sc->sc_dev.dv_xname, error); 914 goto fail_4; 915 } 916 917 /* Load the map for the RX ring. */ 918 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 919 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ, 920 (caddr_t *)&sc->rl_ldata.rl_rx_list, 921 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 922 printf("%s: can't map rx list, error = %d\n", 923 sc->sc_dev.dv_xname, error); 924 goto fail_5; 925 926 } 927 928 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1, 929 RL_RX_DMAMEM_SZ, 0, 0, 930 &sc->rl_ldata.rl_rx_list_map)) != 0) { 931 printf("%s: can't create rx list map, error = %d\n", 932 sc->sc_dev.dv_xname, error); 933 goto fail_6; 934 } 935 936 if ((error = bus_dmamap_load(sc->sc_dmat, 937 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 938 RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 939 printf("%s: can't load rx list, error = %d\n", 940 sc->sc_dev.dv_xname, error); 941 goto fail_7; 942 } 943 944 /* Create DMA maps for RX buffers */ 945 for (i = 0; i < RL_RX_DESC_CNT; i++) { 946 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 947 0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 948 if (error) { 949 printf("%s: can't create DMA map for RX\n", 950 sc->sc_dev.dv_xname); 951 goto fail_8; 952 } 953 } 954 955 ifp = &sc->sc_arpcom.ac_if; 956 ifp->if_softc = sc; 957 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 958 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 959 ifp->if_ioctl = re_ioctl; 960 ifp->if_start = re_start; 961 ifp->if_watchdog = re_watchdog; 962 if ((sc->rl_flags & RL_FLAG_NOJUMBO) == 0) 963 ifp->if_hardmtu = RL_JUMBO_MTU; 964 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); 965 IFQ_SET_READY(&ifp->if_snd); 966 967 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 968 IFCAP_CSUM_UDPv4; 969 970 /* 971 * RTL8168/8111C generates wrong IP checksummed frame if the 972 * packet has IP options so disable TX IP checksum offloading. 973 */ 974 switch (sc->sc_hwrev) { 975 case RL_HWREV_8168C: 976 case RL_HWREV_8168C_SPIN2: 977 case RL_HWREV_8168CP: 978 break; 979 default: 980 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 981 } 982 983 #if NVLAN > 0 984 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 985 #endif 986 987 #ifndef SMALL_KERNEL 988 ifp->if_capabilities |= IFCAP_WOL; 989 ifp->if_wol = re_wol; 990 re_wol(ifp, 0); 991 #endif 992 timeout_set(&sc->timer_handle, re_tick, sc); 993 994 /* Take PHY out of power down mode. */ 995 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 996 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 997 if (sc->sc_hwrev == RL_HWREV_8401E) 998 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 999 } 1000 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1001 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1002 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1003 } 1004 1005 /* Do MII setup */ 1006 sc->sc_mii.mii_ifp = ifp; 1007 sc->sc_mii.mii_readreg = re_miibus_readreg; 1008 sc->sc_mii.mii_writereg = re_miibus_writereg; 1009 sc->sc_mii.mii_statchg = re_miibus_statchg; 1010 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1011 re_ifmedia_sts); 1012 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1013 MII_OFFSET_ANY, MIIF_DOPAUSE); 1014 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1015 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1016 ifmedia_add(&sc->sc_mii.mii_media, 1017 IFM_ETHER|IFM_NONE, 0, NULL); 1018 ifmedia_set(&sc->sc_mii.mii_media, 1019 IFM_ETHER|IFM_NONE); 1020 } else 1021 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1022 1023 /* 1024 * Call MI attach routine. 1025 */ 1026 re_reset(sc); 1027 if_attach(ifp); 1028 ether_ifattach(ifp); 1029 1030 return (0); 1031 1032 fail_8: 1033 /* Destroy DMA maps for RX buffers. */ 1034 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1035 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1036 bus_dmamap_destroy(sc->sc_dmat, 1037 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1038 } 1039 1040 /* Free DMA'able memory for the RX ring. */ 1041 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1042 fail_7: 1043 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1044 fail_6: 1045 bus_dmamem_unmap(sc->sc_dmat, 1046 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ); 1047 fail_5: 1048 bus_dmamem_free(sc->sc_dmat, 1049 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1050 1051 fail_4: 1052 /* Destroy DMA maps for TX buffers. */ 1053 for (i = 0; i < RL_TX_QLEN; i++) { 1054 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1055 bus_dmamap_destroy(sc->sc_dmat, 1056 sc->rl_ldata.rl_txq[i].txq_dmamap); 1057 } 1058 1059 /* Free DMA'able memory for the TX ring. */ 1060 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1061 fail_3: 1062 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1063 fail_2: 1064 bus_dmamem_unmap(sc->sc_dmat, 1065 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1066 fail_1: 1067 bus_dmamem_free(sc->sc_dmat, 1068 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1069 fail_0: 1070 return (1); 1071 } 1072 1073 1074 int 1075 re_newbuf(struct rl_softc *sc) 1076 { 1077 struct mbuf *m; 1078 bus_dmamap_t map; 1079 struct rl_desc *d; 1080 struct rl_rxsoft *rxs; 1081 u_int32_t cmdstat; 1082 int error, idx; 1083 1084 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1085 if (!m) 1086 return (ENOBUFS); 1087 1088 /* 1089 * Initialize mbuf length fields and fixup 1090 * alignment so that the frame payload is 1091 * longword aligned on strict alignment archs. 1092 */ 1093 m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN; 1094 m->m_data += RE_ETHER_ALIGN; 1095 1096 idx = sc->rl_ldata.rl_rx_prodidx; 1097 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1098 map = rxs->rxs_dmamap; 1099 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1100 BUS_DMA_READ|BUS_DMA_NOWAIT); 1101 if (error) { 1102 m_freem(m); 1103 return (ENOBUFS); 1104 } 1105 1106 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1107 BUS_DMASYNC_PREREAD); 1108 1109 d = &sc->rl_ldata.rl_rx_list[idx]; 1110 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1111 cmdstat = letoh32(d->rl_cmdstat); 1112 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1113 if (cmdstat & RL_RDESC_STAT_OWN) { 1114 printf("%s: tried to map busy RX descriptor\n", 1115 sc->sc_dev.dv_xname); 1116 m_freem(m); 1117 return (ENOBUFS); 1118 } 1119 1120 rxs->rxs_mbuf = m; 1121 1122 d->rl_vlanctl = 0; 1123 cmdstat = map->dm_segs[0].ds_len; 1124 if (idx == (RL_RX_DESC_CNT - 1)) 1125 cmdstat |= RL_RDESC_CMD_EOR; 1126 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1127 d->rl_cmdstat = htole32(cmdstat); 1128 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1129 cmdstat |= RL_RDESC_CMD_OWN; 1130 d->rl_cmdstat = htole32(cmdstat); 1131 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1132 1133 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1134 1135 return (0); 1136 } 1137 1138 1139 int 1140 re_tx_list_init(struct rl_softc *sc) 1141 { 1142 int i; 1143 1144 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1145 for (i = 0; i < RL_TX_QLEN; i++) { 1146 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1147 } 1148 1149 bus_dmamap_sync(sc->sc_dmat, 1150 sc->rl_ldata.rl_tx_list_map, 0, 1151 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1152 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1153 sc->rl_ldata.rl_txq_prodidx = 0; 1154 sc->rl_ldata.rl_txq_considx = 0; 1155 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc); 1156 sc->rl_ldata.rl_tx_nextfree = 0; 1157 1158 return (0); 1159 } 1160 1161 int 1162 re_rx_list_init(struct rl_softc *sc) 1163 { 1164 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1165 1166 sc->rl_ldata.rl_rx_prodidx = 0; 1167 sc->rl_ldata.rl_rx_considx = 0; 1168 sc->rl_head = sc->rl_tail = NULL; 1169 1170 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, RL_RX_DESC_CNT); 1171 re_rx_list_fill(sc); 1172 1173 return (0); 1174 } 1175 1176 void 1177 re_rx_list_fill(struct rl_softc *sc) 1178 { 1179 u_int slots; 1180 1181 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, RL_RX_DESC_CNT); 1182 slots > 0; slots--) { 1183 if (re_newbuf(sc) == ENOBUFS) 1184 break; 1185 } 1186 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots); 1187 } 1188 1189 /* 1190 * RX handler for C+ and 8169. For the gigE chips, we support 1191 * the reception of jumbo frames that have been fragmented 1192 * across multiple 2K mbuf cluster buffers. 1193 */ 1194 int 1195 re_rxeof(struct rl_softc *sc) 1196 { 1197 struct mbuf *m; 1198 struct ifnet *ifp; 1199 int i, total_len, rx = 0; 1200 struct rl_desc *cur_rx; 1201 struct rl_rxsoft *rxs; 1202 u_int32_t rxstat, rxvlan; 1203 1204 ifp = &sc->sc_arpcom.ac_if; 1205 1206 for (i = sc->rl_ldata.rl_rx_considx; 1207 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0; 1208 i = RL_NEXT_RX_DESC(sc, i)) { 1209 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1210 RL_RXDESCSYNC(sc, i, 1211 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1212 rxstat = letoh32(cur_rx->rl_cmdstat); 1213 rxvlan = letoh32(cur_rx->rl_vlanctl); 1214 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1215 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1216 break; 1217 total_len = rxstat & sc->rl_rxlenmask; 1218 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1219 m = rxs->rxs_mbuf; 1220 rxs->rxs_mbuf = NULL; 1221 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1); 1222 rx = 1; 1223 1224 /* Invalidate the RX mbuf and unload its map */ 1225 1226 bus_dmamap_sync(sc->sc_dmat, 1227 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1228 BUS_DMASYNC_POSTREAD); 1229 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1230 1231 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1232 m->m_len = RE_RX_DESC_BUFLEN; 1233 if (sc->rl_head == NULL) 1234 sc->rl_head = sc->rl_tail = m; 1235 else { 1236 m->m_flags &= ~M_PKTHDR; 1237 sc->rl_tail->m_next = m; 1238 sc->rl_tail = m; 1239 } 1240 continue; 1241 } 1242 1243 /* 1244 * NOTE: for the 8139C+, the frame length field 1245 * is always 12 bits in size, but for the gigE chips, 1246 * it is 13 bits (since the max RX frame length is 16K). 1247 * Unfortunately, all 32 bits in the status word 1248 * were already used, so to make room for the extra 1249 * length bit, RealTek took out the 'frame alignment 1250 * error' bit and shifted the other status bits 1251 * over one slot. The OWN, EOR, FS and LS bits are 1252 * still in the same places. We have already extracted 1253 * the frame length and checked the OWN bit, so rather 1254 * than using an alternate bit mapping, we shift the 1255 * status bits one space to the right so we can evaluate 1256 * them using the 8169 status as though it was in the 1257 * same format as that of the 8139C+. 1258 */ 1259 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1260 rxstat >>= 1; 1261 1262 /* 1263 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1264 * set, but if CRC is clear, it will still be a valid frame. 1265 */ 1266 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1267 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1268 ifp->if_ierrors++; 1269 /* 1270 * If this is part of a multi-fragment packet, 1271 * discard all the pieces. 1272 */ 1273 if (sc->rl_head != NULL) { 1274 m_freem(sc->rl_head); 1275 sc->rl_head = sc->rl_tail = NULL; 1276 } 1277 continue; 1278 } 1279 1280 if (sc->rl_head != NULL) { 1281 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1282 if (m->m_len == 0) 1283 m->m_len = RE_RX_DESC_BUFLEN; 1284 /* 1285 * Special case: if there's 4 bytes or less 1286 * in this buffer, the mbuf can be discarded: 1287 * the last 4 bytes is the CRC, which we don't 1288 * care about anyway. 1289 */ 1290 if (m->m_len <= ETHER_CRC_LEN) { 1291 sc->rl_tail->m_len -= 1292 (ETHER_CRC_LEN - m->m_len); 1293 m_freem(m); 1294 } else { 1295 m->m_len -= ETHER_CRC_LEN; 1296 m->m_flags &= ~M_PKTHDR; 1297 sc->rl_tail->m_next = m; 1298 } 1299 m = sc->rl_head; 1300 sc->rl_head = sc->rl_tail = NULL; 1301 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1302 } else 1303 m->m_pkthdr.len = m->m_len = 1304 (total_len - ETHER_CRC_LEN); 1305 1306 ifp->if_ipackets++; 1307 m->m_pkthdr.rcvif = ifp; 1308 1309 /* Do RX checksumming */ 1310 1311 if (sc->rl_flags & RL_FLAG_DESCV2) { 1312 /* Check IP header checksum */ 1313 if ((rxvlan & RL_RDESC_IPV4) && 1314 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1315 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1316 1317 /* Check TCP/UDP checksum */ 1318 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1319 (((rxstat & RL_RDESC_STAT_TCP) && 1320 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1321 ((rxstat & RL_RDESC_STAT_UDP) && 1322 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1323 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1324 M_UDP_CSUM_IN_OK; 1325 } else { 1326 /* Check IP header checksum */ 1327 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1328 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1329 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1330 1331 /* Check TCP/UDP checksum */ 1332 if ((RL_TCPPKT(rxstat) && 1333 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1334 (RL_UDPPKT(rxstat) && 1335 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1336 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1337 M_UDP_CSUM_IN_OK; 1338 } 1339 #if NVLAN > 0 1340 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1341 m->m_pkthdr.ether_vtag = 1342 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1343 m->m_flags |= M_VLANTAG; 1344 } 1345 #endif 1346 1347 #if NBPFILTER > 0 1348 if (ifp->if_bpf) 1349 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1350 #endif 1351 ether_input_mbuf(ifp, m); 1352 } 1353 1354 sc->rl_ldata.rl_rx_considx = i; 1355 re_rx_list_fill(sc); 1356 1357 return (rx); 1358 } 1359 1360 int 1361 re_txeof(struct rl_softc *sc) 1362 { 1363 struct ifnet *ifp; 1364 struct rl_txq *txq; 1365 uint32_t txstat; 1366 int idx, descidx, tx = 0; 1367 1368 ifp = &sc->sc_arpcom.ac_if; 1369 1370 for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) { 1371 txq = &sc->rl_ldata.rl_txq[idx]; 1372 1373 if (txq->txq_mbuf == NULL) { 1374 KASSERT(idx == sc->rl_ldata.rl_txq_prodidx); 1375 break; 1376 } 1377 1378 descidx = txq->txq_descidx; 1379 RL_TXDESCSYNC(sc, descidx, 1380 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1381 txstat = 1382 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); 1383 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1384 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); 1385 if (txstat & RL_TDESC_CMD_OWN) 1386 break; 1387 1388 tx = 1; 1389 sc->rl_ldata.rl_tx_free += txq->txq_nsegs; 1390 KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc)); 1391 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1392 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1393 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1394 m_freem(txq->txq_mbuf); 1395 txq->txq_mbuf = NULL; 1396 1397 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1398 ifp->if_collisions++; 1399 if (txstat & RL_TDESC_STAT_TXERRSUM) 1400 ifp->if_oerrors++; 1401 else 1402 ifp->if_opackets++; 1403 } 1404 1405 sc->rl_ldata.rl_txq_considx = idx; 1406 1407 ifp->if_flags &= ~IFF_OACTIVE; 1408 1409 /* 1410 * Some chips will ignore a second TX request issued while an 1411 * existing transmission is in progress. If the transmitter goes 1412 * idle but there are still packets waiting to be sent, we need 1413 * to restart the channel here to flush them out. This only 1414 * seems to be required with the PCIe devices. 1415 */ 1416 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc)) 1417 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1418 else 1419 ifp->if_timer = 0; 1420 1421 return (tx); 1422 } 1423 1424 void 1425 re_tick(void *xsc) 1426 { 1427 struct rl_softc *sc = xsc; 1428 struct mii_data *mii; 1429 struct ifnet *ifp; 1430 int s; 1431 1432 ifp = &sc->sc_arpcom.ac_if; 1433 mii = &sc->sc_mii; 1434 1435 s = splnet(); 1436 1437 mii_tick(mii); 1438 if (sc->rl_flags & RL_FLAG_LINK) { 1439 if (!(mii->mii_media_status & IFM_ACTIVE)) 1440 sc->rl_flags &= ~RL_FLAG_LINK; 1441 } else { 1442 if (mii->mii_media_status & IFM_ACTIVE && 1443 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1444 sc->rl_flags |= RL_FLAG_LINK; 1445 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1446 re_start(ifp); 1447 } 1448 } 1449 splx(s); 1450 1451 timeout_add_sec(&sc->timer_handle, 1); 1452 } 1453 1454 int 1455 re_intr(void *arg) 1456 { 1457 struct rl_softc *sc = arg; 1458 struct ifnet *ifp; 1459 u_int16_t status; 1460 int claimed = 0, rx, tx; 1461 1462 ifp = &sc->sc_arpcom.ac_if; 1463 1464 if (!(ifp->if_flags & IFF_RUNNING)) 1465 return (0); 1466 1467 /* Disable interrupts. */ 1468 CSR_WRITE_2(sc, RL_IMR, 0); 1469 1470 rx = tx = 0; 1471 status = CSR_READ_2(sc, RL_ISR); 1472 /* If the card has gone away the read returns 0xffff. */ 1473 if (status == 0xffff) 1474 return (0); 1475 if (status) 1476 CSR_WRITE_2(sc, RL_ISR, status); 1477 1478 if (status & RL_ISR_TIMEOUT_EXPIRED) 1479 claimed = 1; 1480 1481 if (status & RL_INTRS_CPLUS) { 1482 if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) { 1483 rx |= re_rxeof(sc); 1484 claimed = 1; 1485 } 1486 1487 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1488 tx |= re_txeof(sc); 1489 claimed = 1; 1490 } 1491 1492 if (status & RL_ISR_SYSTEM_ERR) { 1493 re_reset(sc); 1494 re_init(ifp); 1495 claimed = 1; 1496 } 1497 1498 if (status & RL_ISR_LINKCHG) { 1499 timeout_del(&sc->timer_handle); 1500 re_tick(sc); 1501 claimed = 1; 1502 } 1503 } 1504 1505 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1506 if ((sc->rl_flags & RL_FLAG_TIMERINTR)) { 1507 if ((tx | rx) == 0) { 1508 /* 1509 * Nothing needs to be processed, fallback 1510 * to use TX/RX interrupts. 1511 */ 1512 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1513 1514 /* 1515 * Recollect, mainly to avoid the possible 1516 * race introduced by changing interrupt 1517 * masks. 1518 */ 1519 re_rxeof(sc); 1520 tx = re_txeof(sc); 1521 } else 1522 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1523 } else if (tx | rx) { 1524 /* 1525 * Assume that using simulated interrupt moderation 1526 * (hardware timer based) could reduce the interrupt 1527 * rate. 1528 */ 1529 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1530 } 1531 } 1532 1533 if (tx && !IFQ_IS_EMPTY(&ifp->if_snd)) 1534 re_start(ifp); 1535 1536 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 1537 1538 return (claimed); 1539 } 1540 1541 int 1542 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx) 1543 { 1544 bus_dmamap_t map; 1545 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; 1546 struct rl_desc *d; 1547 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1548 struct rl_txq *txq; 1549 1550 /* 1551 * Set up checksum offload. Note: checksum offload bits must 1552 * appear in all descriptors of a multi-descriptor transmit 1553 * attempt. This is according to testing done with an 8169 1554 * chip. This is a requirement. 1555 */ 1556 1557 /* 1558 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1559 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1560 * RL_TDESC_CMD_UDPCSUM does not take affect. 1561 */ 1562 1563 if ((m->m_pkthdr.csum_flags & 1564 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1565 if (sc->rl_flags & RL_FLAG_DESCV2) { 1566 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1567 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1568 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1569 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1570 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1571 } else { 1572 csum_flags |= RL_TDESC_CMD_IPCSUM; 1573 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1574 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1575 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1576 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1577 } 1578 } 1579 1580 txq = &sc->rl_ldata.rl_txq[*idx]; 1581 map = txq->txq_dmamap; 1582 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1583 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1584 if (error) { 1585 /* XXX try to defrag if EFBIG? */ 1586 printf("%s: can't map mbuf (error %d)\n", 1587 sc->sc_dev.dv_xname, error); 1588 return (error); 1589 } 1590 1591 nsegs = map->dm_nsegs; 1592 pad = 0; 1593 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 && 1594 m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN && 1595 (csum_flags & RL_TDESC_CMD_IPCSUM) != 0) { 1596 pad = 1; 1597 nsegs++; 1598 } 1599 1600 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 1601 error = EFBIG; 1602 goto fail_unload; 1603 } 1604 1605 /* 1606 * Make sure that the caches are synchronized before we 1607 * ask the chip to start DMA for the packet data. 1608 */ 1609 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1610 BUS_DMASYNC_PREWRITE); 1611 1612 /* 1613 * Set up hardware VLAN tagging. Note: vlan tag info must 1614 * appear in all descriptors of a multi-descriptor 1615 * transmission attempt. 1616 */ 1617 #if NVLAN > 0 1618 if (m->m_flags & M_VLANTAG) 1619 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1620 RL_TDESC_VLANCTL_TAG; 1621 #endif 1622 1623 /* 1624 * Map the segment array into descriptors. Note that we set the 1625 * start-of-frame and end-of-frame markers for either TX or RX, but 1626 * they really only have meaning in the TX case. (In the RX case, 1627 * it's the chip that tells us where packets begin and end.) 1628 * We also keep track of the end of the ring and set the 1629 * end-of-ring bits as needed, and we set the ownership bits 1630 * in all except the very first descriptor. (The caller will 1631 * set this descriptor later when it start transmission or 1632 * reception.) 1633 */ 1634 curidx = startidx = sc->rl_ldata.rl_tx_nextfree; 1635 lastidx = -1; 1636 for (seg = 0; seg < map->dm_nsegs; 1637 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { 1638 d = &sc->rl_ldata.rl_tx_list[curidx]; 1639 RL_TXDESCSYNC(sc, curidx, 1640 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1641 cmdstat = letoh32(d->rl_cmdstat); 1642 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); 1643 if (cmdstat & RL_TDESC_STAT_OWN) { 1644 printf("%s: tried to map busy TX descriptor\n", 1645 sc->sc_dev.dv_xname); 1646 for (; seg > 0; seg --) { 1647 uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) % 1648 RL_TX_DESC_CNT(sc); 1649 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; 1650 RL_TXDESCSYNC(sc, uidx, 1651 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1652 } 1653 error = ENOBUFS; 1654 goto fail_unload; 1655 } 1656 1657 d->rl_vlanctl = htole32(vlanctl); 1658 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1659 cmdstat = csum_flags | map->dm_segs[seg].ds_len; 1660 if (seg == 0) 1661 cmdstat |= RL_TDESC_CMD_SOF; 1662 else 1663 cmdstat |= RL_TDESC_CMD_OWN; 1664 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1665 cmdstat |= RL_TDESC_CMD_EOR; 1666 if (seg == nsegs - 1) { 1667 cmdstat |= RL_TDESC_CMD_EOF; 1668 lastidx = curidx; 1669 } 1670 d->rl_cmdstat = htole32(cmdstat); 1671 RL_TXDESCSYNC(sc, curidx, 1672 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1673 } 1674 if (pad) { 1675 d = &sc->rl_ldata.rl_tx_list[curidx]; 1676 d->rl_vlanctl = htole32(vlanctl); 1677 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1678 cmdstat = csum_flags | 1679 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1680 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1681 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1682 cmdstat |= RL_TDESC_CMD_EOR; 1683 d->rl_cmdstat = htole32(cmdstat); 1684 RL_TXDESCSYNC(sc, curidx, 1685 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1686 lastidx = curidx; 1687 curidx = RL_NEXT_TX_DESC(sc, curidx); 1688 } 1689 KASSERT(lastidx != -1); 1690 1691 /* Transfer ownership of packet to the chip. */ 1692 1693 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= 1694 htole32(RL_TDESC_CMD_OWN); 1695 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1696 1697 /* update info of TX queue and descriptors */ 1698 txq->txq_mbuf = m; 1699 txq->txq_descidx = lastidx; 1700 txq->txq_nsegs = nsegs; 1701 1702 sc->rl_ldata.rl_tx_free -= nsegs; 1703 sc->rl_ldata.rl_tx_nextfree = curidx; 1704 1705 *idx = RL_NEXT_TXQ(sc, *idx); 1706 1707 return (0); 1708 1709 fail_unload: 1710 bus_dmamap_unload(sc->sc_dmat, map); 1711 1712 return (error); 1713 } 1714 1715 /* 1716 * Main transmit routine for C+ and gigE NICs. 1717 */ 1718 1719 void 1720 re_start(struct ifnet *ifp) 1721 { 1722 struct rl_softc *sc; 1723 int idx, queued = 0; 1724 1725 sc = ifp->if_softc; 1726 1727 if (ifp->if_flags & IFF_OACTIVE) 1728 return; 1729 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1730 return; 1731 1732 idx = sc->rl_ldata.rl_txq_prodidx; 1733 for (;;) { 1734 struct mbuf *m; 1735 int error; 1736 1737 IFQ_POLL(&ifp->if_snd, m); 1738 if (m == NULL) 1739 break; 1740 1741 if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) { 1742 KASSERT(idx == sc->rl_ldata.rl_txq_considx); 1743 ifp->if_flags |= IFF_OACTIVE; 1744 break; 1745 } 1746 1747 error = re_encap(sc, m, &idx); 1748 if (error == EFBIG && 1749 sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) { 1750 IFQ_DEQUEUE(&ifp->if_snd, m); 1751 m_freem(m); 1752 ifp->if_oerrors++; 1753 continue; 1754 } 1755 if (error) { 1756 ifp->if_flags |= IFF_OACTIVE; 1757 break; 1758 } 1759 1760 IFQ_DEQUEUE(&ifp->if_snd, m); 1761 queued++; 1762 1763 #if NBPFILTER > 0 1764 /* 1765 * If there's a BPF listener, bounce a copy of this frame 1766 * to him. 1767 */ 1768 if (ifp->if_bpf) 1769 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1770 #endif 1771 } 1772 1773 if (queued == 0) 1774 return; 1775 1776 sc->rl_ldata.rl_txq_prodidx = idx; 1777 1778 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1779 1780 /* 1781 * Set a timeout in case the chip goes out to lunch. 1782 */ 1783 ifp->if_timer = 5; 1784 } 1785 1786 int 1787 re_init(struct ifnet *ifp) 1788 { 1789 struct rl_softc *sc = ifp->if_softc; 1790 u_int16_t cfg; 1791 uint32_t rxcfg; 1792 int s; 1793 union { 1794 u_int32_t align_dummy; 1795 u_char eaddr[ETHER_ADDR_LEN]; 1796 } eaddr; 1797 1798 s = splnet(); 1799 1800 /* 1801 * Cancel pending I/O and free all RX/TX buffers. 1802 */ 1803 re_stop(ifp); 1804 1805 /* 1806 * Enable C+ RX and TX mode, as well as VLAN stripping and 1807 * RX checksum offload. We must configure the C+ register 1808 * before all others. 1809 */ 1810 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW | 1811 RL_CPLUSCMD_RXCSUM_ENB; 1812 1813 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1814 cfg |= RL_CPLUSCMD_VLANSTRIP; 1815 1816 if (sc->rl_flags & RL_FLAG_MACSTAT) 1817 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1818 else 1819 cfg |= RL_CPLUSCMD_RXENB; 1820 1821 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1822 1823 /* 1824 * Init our MAC address. Even though the chipset 1825 * documentation doesn't mention it, we need to enter "Config 1826 * register write enable" mode to modify the ID registers. 1827 */ 1828 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1829 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1830 CSR_WRITE_4(sc, RL_IDR4, 1831 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1832 CSR_WRITE_4(sc, RL_IDR0, 1833 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1834 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1835 1836 /* 1837 * For C+ mode, initialize the RX descriptors and mbufs. 1838 */ 1839 re_rx_list_init(sc); 1840 re_tx_list_init(sc); 1841 1842 /* 1843 * Load the addresses of the RX and TX lists into the chip. 1844 */ 1845 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1846 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1847 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1848 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1849 1850 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1851 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1852 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1853 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1854 1855 if (sc->rl_flags & RL_FLAG_RXDV_GATED) 1856 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 1857 ~0x00080000); 1858 1859 /* 1860 * Enable transmit and receive. 1861 */ 1862 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1863 1864 /* 1865 * Set the initial TX and RX configuration. 1866 */ 1867 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1868 1869 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 1870 1871 rxcfg = RL_RXCFG_CONFIG; 1872 if (sc->rl_flags & RL_FLAG_EARLYOFF) 1873 rxcfg |= RL_RXCFG_EARLYOFF; 1874 else if (sc->rl_flags & RL_FLAG_EARLYOFFV2) 1875 rxcfg |= RL_RXCFG_EARLYOFFV2; 1876 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 1877 1878 /* Program promiscuous mode and multicast filters. */ 1879 re_iff(sc); 1880 1881 /* 1882 * Enable interrupts. 1883 */ 1884 re_setup_intr(sc, 1, sc->rl_imtype); 1885 CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype); 1886 1887 /* Start RX/TX process. */ 1888 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1889 #ifdef notdef 1890 /* Enable receiver and transmitter. */ 1891 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1892 #endif 1893 1894 /* 1895 * For 8169 gigE NICs, set the max allowed RX packet 1896 * size so we can receive jumbo frames. 1897 */ 1898 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1899 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 1900 1901 mii_mediachg(&sc->sc_mii); 1902 1903 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 1904 1905 ifp->if_flags |= IFF_RUNNING; 1906 ifp->if_flags &= ~IFF_OACTIVE; 1907 1908 splx(s); 1909 1910 sc->rl_flags &= ~RL_FLAG_LINK; 1911 1912 timeout_add_sec(&sc->timer_handle, 1); 1913 1914 return (0); 1915 } 1916 1917 /* 1918 * Set media options. 1919 */ 1920 int 1921 re_ifmedia_upd(struct ifnet *ifp) 1922 { 1923 struct rl_softc *sc; 1924 1925 sc = ifp->if_softc; 1926 1927 return (mii_mediachg(&sc->sc_mii)); 1928 } 1929 1930 /* 1931 * Report current media status. 1932 */ 1933 void 1934 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1935 { 1936 struct rl_softc *sc; 1937 1938 sc = ifp->if_softc; 1939 1940 mii_pollstat(&sc->sc_mii); 1941 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1942 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1943 } 1944 1945 int 1946 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1947 { 1948 struct rl_softc *sc = ifp->if_softc; 1949 struct ifreq *ifr = (struct ifreq *) data; 1950 struct ifaddr *ifa = (struct ifaddr *)data; 1951 int s, error = 0; 1952 1953 s = splnet(); 1954 1955 switch(command) { 1956 case SIOCSIFADDR: 1957 ifp->if_flags |= IFF_UP; 1958 if (!(ifp->if_flags & IFF_RUNNING)) 1959 re_init(ifp); 1960 #ifdef INET 1961 if (ifa->ifa_addr->sa_family == AF_INET) 1962 arp_ifinit(&sc->sc_arpcom, ifa); 1963 #endif /* INET */ 1964 break; 1965 case SIOCSIFFLAGS: 1966 if (ifp->if_flags & IFF_UP) { 1967 if (ifp->if_flags & IFF_RUNNING) 1968 error = ENETRESET; 1969 else 1970 re_init(ifp); 1971 } else { 1972 if (ifp->if_flags & IFF_RUNNING) 1973 re_stop(ifp); 1974 } 1975 break; 1976 case SIOCGIFMEDIA: 1977 case SIOCSIFMEDIA: 1978 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1979 break; 1980 default: 1981 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1982 } 1983 1984 if (error == ENETRESET) { 1985 if (ifp->if_flags & IFF_RUNNING) 1986 re_iff(sc); 1987 error = 0; 1988 } 1989 1990 splx(s); 1991 return (error); 1992 } 1993 1994 void 1995 re_watchdog(struct ifnet *ifp) 1996 { 1997 struct rl_softc *sc; 1998 int s; 1999 2000 sc = ifp->if_softc; 2001 s = splnet(); 2002 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2003 ifp->if_oerrors++; 2004 2005 re_txeof(sc); 2006 re_rxeof(sc); 2007 2008 re_init(ifp); 2009 2010 splx(s); 2011 } 2012 2013 /* 2014 * Stop the adapter and free any mbufs allocated to the 2015 * RX and TX lists. 2016 */ 2017 void 2018 re_stop(struct ifnet *ifp) 2019 { 2020 struct rl_softc *sc; 2021 int i; 2022 2023 sc = ifp->if_softc; 2024 2025 ifp->if_timer = 0; 2026 sc->rl_flags &= ~(RL_FLAG_LINK|RL_FLAG_TIMERINTR); 2027 2028 timeout_del(&sc->timer_handle); 2029 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2030 2031 mii_down(&sc->sc_mii); 2032 2033 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2034 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2035 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2036 2037 if (sc->rl_head != NULL) { 2038 m_freem(sc->rl_head); 2039 sc->rl_head = sc->rl_tail = NULL; 2040 } 2041 2042 /* Free the TX list buffers. */ 2043 for (i = 0; i < RL_TX_QLEN; i++) { 2044 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2045 bus_dmamap_unload(sc->sc_dmat, 2046 sc->rl_ldata.rl_txq[i].txq_dmamap); 2047 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2048 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2049 } 2050 } 2051 2052 /* Free the RX list buffers. */ 2053 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2054 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2055 bus_dmamap_unload(sc->sc_dmat, 2056 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2057 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2058 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2059 } 2060 } 2061 } 2062 2063 void 2064 re_setup_hw_im(struct rl_softc *sc) 2065 { 2066 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2067 2068 /* 2069 * Interrupt moderation 2070 * 2071 * 0xABCD 2072 * A - unknown (maybe TX related) 2073 * B - TX timer (unit: 25us) 2074 * C - unknown (maybe RX related) 2075 * D - RX timer (unit: 25us) 2076 * 2077 * 2078 * re(4)'s interrupt moderation is actually controlled by 2079 * two variables, like most other NICs (bge, bnx etc.) 2080 * o timer 2081 * o number of packets [P] 2082 * 2083 * The logic relationship between these two variables is 2084 * similar to other NICs too: 2085 * if (timer expire || packets > [P]) 2086 * Interrupt is delivered 2087 * 2088 * Currently we only know how to set 'timer', but not 2089 * 'number of packets', which should be ~30, as far as I 2090 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2091 */ 2092 CSR_WRITE_2(sc, RL_IM, 2093 RL_IM_RXTIME(sc->rl_rx_time) | 2094 RL_IM_TXTIME(sc->rl_tx_time) | 2095 RL_IM_MAGIC); 2096 } 2097 2098 void 2099 re_disable_hw_im(struct rl_softc *sc) 2100 { 2101 if (sc->rl_flags & RL_FLAG_HWIM) 2102 CSR_WRITE_2(sc, RL_IM, 0); 2103 } 2104 2105 void 2106 re_setup_sim_im(struct rl_softc *sc) 2107 { 2108 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2109 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2110 else { 2111 u_int32_t ticks; 2112 2113 /* 2114 * Datasheet says tick decreases at bus speed, 2115 * but it seems the clock runs a little bit 2116 * faster, so we do some compensation here. 2117 */ 2118 ticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2119 CSR_WRITE_4(sc, RL_TIMERINT_8169, ticks); 2120 } 2121 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2122 sc->rl_flags |= RL_FLAG_TIMERINTR; 2123 } 2124 2125 void 2126 re_disable_sim_im(struct rl_softc *sc) 2127 { 2128 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2129 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2130 else 2131 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2132 sc->rl_flags &= ~RL_FLAG_TIMERINTR; 2133 } 2134 2135 void 2136 re_config_imtype(struct rl_softc *sc, int imtype) 2137 { 2138 switch (imtype) { 2139 case RL_IMTYPE_HW: 2140 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2141 /* FALLTHROUGH */ 2142 case RL_IMTYPE_NONE: 2143 sc->rl_intrs = RL_INTRS_CPLUS; 2144 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2145 RL_ISR_RX_OVERRUN; 2146 sc->rl_tx_ack = RL_ISR_TX_OK; 2147 break; 2148 2149 case RL_IMTYPE_SIM: 2150 sc->rl_intrs = RL_INTRS_TIMER; 2151 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2152 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2153 break; 2154 2155 default: 2156 panic("%s: unknown imtype %d", 2157 sc->sc_dev.dv_xname, imtype); 2158 } 2159 } 2160 2161 void 2162 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2163 { 2164 re_config_imtype(sc, imtype); 2165 2166 if (enable_intrs) 2167 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2168 else 2169 CSR_WRITE_2(sc, RL_IMR, 0); 2170 2171 switch (imtype) { 2172 case RL_IMTYPE_NONE: 2173 re_disable_sim_im(sc); 2174 re_disable_hw_im(sc); 2175 break; 2176 2177 case RL_IMTYPE_HW: 2178 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2179 re_disable_sim_im(sc); 2180 re_setup_hw_im(sc); 2181 break; 2182 2183 case RL_IMTYPE_SIM: 2184 re_disable_hw_im(sc); 2185 re_setup_sim_im(sc); 2186 break; 2187 2188 default: 2189 panic("%s: unknown imtype %d", 2190 sc->sc_dev.dv_xname, imtype); 2191 } 2192 } 2193 2194 #ifndef SMALL_KERNEL 2195 int 2196 re_wol(struct ifnet *ifp, int enable) 2197 { 2198 struct rl_softc *sc = ifp->if_softc; 2199 int i; 2200 u_int8_t val; 2201 struct re_wolcfg { 2202 u_int8_t enable; 2203 u_int8_t reg; 2204 u_int8_t bit; 2205 } re_wolcfg[] = { 2206 /* Always disable all wake events expect magic packet. */ 2207 { 0, RL_CFG5, RL_CFG5_WOL_UCAST }, 2208 { 0, RL_CFG5, RL_CFG5_WOL_MCAST }, 2209 { 0, RL_CFG5, RL_CFG5_WOL_BCAST }, 2210 { 1, RL_CFG3, RL_CFG3_WOL_MAGIC }, 2211 { 0, RL_CFG3, RL_CFG3_WOL_LINK } 2212 }; 2213 2214 if (enable) { 2215 if ((CSR_READ_1(sc, RL_CFG1) & RL_CFG1_PME) == 0) { 2216 printf("%s: power management is disabled, " 2217 "cannot do WOL\n", sc->sc_dev.dv_xname); 2218 return (ENOTSUP); 2219 } 2220 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_AUXPWR) == 0) 2221 printf("%s: no auxiliary power, cannot do WOL from D3 " 2222 "(power-off) state\n", sc->sc_dev.dv_xname); 2223 } 2224 2225 /* Temporarily enable write to configuration registers. */ 2226 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2227 2228 for (i = 0; i < nitems(re_wolcfg); i++) { 2229 val = CSR_READ_1(sc, re_wolcfg[i].reg); 2230 if (enable && re_wolcfg[i].enable) 2231 val |= re_wolcfg[i].bit; 2232 else 2233 val &= ~re_wolcfg[i].bit; 2234 CSR_WRITE_1(sc, re_wolcfg[i].reg, val); 2235 } 2236 2237 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2238 2239 return (0); 2240 } 2241 #endif 2242