1 /* $NetBSD: rtl8169.c,v 1.154 2018/06/26 06:48:00 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.154 2018/06/26 06:48:00 msaitoh Exp $"); 37 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */ 38 39 /* 40 * RealTek 8139C+/8169/8169S/8168/8110S PCI NIC driver 41 * 42 * Written by Bill Paul <wpaul@windriver.com> 43 * Senior Networking Software Engineer 44 * Wind River Systems 45 */ 46 47 /* 48 * This driver is designed to support RealTek's next generation of 49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 50 * six devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 51 * RTL8110S, the RTL8168 and the RTL8111. 52 * 53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 54 * with the older 8139 family, however it also supports a special 55 * C+ mode of operation that provides several new performance enhancing 56 * features. These include: 57 * 58 * o Descriptor based DMA mechanism. Each descriptor represents 59 * a single packet fragment. Data buffers may be aligned on 60 * any byte boundary. 61 * 62 * o 64-bit DMA 63 * 64 * o TCP/IP checksum offload for both RX and TX 65 * 66 * o High and normal priority transmit DMA rings 67 * 68 * o VLAN tag insertion and extraction 69 * 70 * o TCP large send (segmentation offload) 71 * 72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 73 * programming API is fairly straightforward. The RX filtering, EEPROM 74 * access and PHY access is the same as it is on the older 8139 series 75 * chips. 76 * 77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 78 * same programming API and feature set as the 8139C+ with the following 79 * differences and additions: 80 * 81 * o 1000Mbps mode 82 * 83 * o Jumbo frames 84 * 85 * o GMII and TBI ports/registers for interfacing with copper 86 * or fiber PHYs 87 * 88 * o RX and TX DMA rings can have up to 1024 descriptors 89 * (the 8139C+ allows a maximum of 64) 90 * 91 * o Slight differences in register layout from the 8139C+ 92 * 93 * The TX start and timer interrupt registers are at different locations 94 * on the 8169 than they are on the 8139C+. Also, the status word in the 95 * RX descriptor has a slightly different bit layout. The 8169 does not 96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 97 * copper gigE PHY. 98 * 99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 100 * (the 'S' stands for 'single-chip'). These devices have the same 101 * programming API as the older 8169, but also have some vendor-specific 102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 104 * 105 * This driver takes advantage of the RX and TX checksum offload and 106 * VLAN tag insertion/extraction features. It also implements TX 107 * interrupt moderation using the timer interrupt registers, which 108 * significantly reduces TX interrupt load. There is also support 109 * for jumbo frames, however the 8169/8169S/8110S can not transmit 110 * jumbo frames larger than 7.5K, so the max MTU possible with this 111 * driver is 7500 bytes. 112 */ 113 114 115 #include <sys/param.h> 116 #include <sys/endian.h> 117 #include <sys/systm.h> 118 #include <sys/sockio.h> 119 #include <sys/mbuf.h> 120 #include <sys/malloc.h> 121 #include <sys/kernel.h> 122 #include <sys/socket.h> 123 #include <sys/device.h> 124 125 #include <net/if.h> 126 #include <net/if_arp.h> 127 #include <net/if_dl.h> 128 #include <net/if_ether.h> 129 #include <net/if_media.h> 130 #include <net/if_vlanvar.h> 131 132 #include <netinet/in_systm.h> /* XXX for IP_MAXPACKET */ 133 #include <netinet/in.h> /* XXX for IP_MAXPACKET */ 134 #include <netinet/ip.h> /* XXX for IP_MAXPACKET */ 135 136 #include <net/bpf.h> 137 #include <sys/rndsource.h> 138 139 #include <sys/bus.h> 140 141 #include <dev/mii/mii.h> 142 #include <dev/mii/miivar.h> 143 144 #include <dev/ic/rtl81x9reg.h> 145 #include <dev/ic/rtl81x9var.h> 146 147 #include <dev/ic/rtl8169var.h> 148 149 static inline void re_set_bufaddr(struct re_desc *, bus_addr_t); 150 151 static int re_newbuf(struct rtk_softc *, int, struct mbuf *); 152 static int re_rx_list_init(struct rtk_softc *); 153 static int re_tx_list_init(struct rtk_softc *); 154 static void re_rxeof(struct rtk_softc *); 155 static void re_txeof(struct rtk_softc *); 156 static void re_tick(void *); 157 static void re_start(struct ifnet *); 158 static int re_ioctl(struct ifnet *, u_long, void *); 159 static int re_init(struct ifnet *); 160 static void re_stop(struct ifnet *, int); 161 static void re_watchdog(struct ifnet *); 162 163 static int re_enable(struct rtk_softc *); 164 static void re_disable(struct rtk_softc *); 165 166 static int re_gmii_readreg(device_t, int, int); 167 static void re_gmii_writereg(device_t, int, int, int); 168 169 static int re_miibus_readreg(device_t, int, int); 170 static void re_miibus_writereg(device_t, int, int, int); 171 static void re_miibus_statchg(struct ifnet *); 172 173 static void re_reset(struct rtk_softc *); 174 175 static inline void 176 re_set_bufaddr(struct re_desc *d, bus_addr_t addr) 177 { 178 179 d->re_bufaddr_lo = htole32((uint32_t)addr); 180 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 181 d->re_bufaddr_hi = htole32((uint64_t)addr >> 32); 182 else 183 d->re_bufaddr_hi = 0; 184 } 185 186 static int 187 re_gmii_readreg(device_t dev, int phy, int reg) 188 { 189 struct rtk_softc *sc = device_private(dev); 190 uint32_t rval; 191 int i; 192 193 if (phy != 7) 194 return 0; 195 196 /* Let the rgephy driver read the GMEDIASTAT register */ 197 198 if (reg == RTK_GMEDIASTAT) { 199 rval = CSR_READ_1(sc, RTK_GMEDIASTAT); 200 return rval; 201 } 202 203 CSR_WRITE_4(sc, RTK_PHYAR, reg << 16); 204 DELAY(1000); 205 206 for (i = 0; i < RTK_TIMEOUT; i++) { 207 rval = CSR_READ_4(sc, RTK_PHYAR); 208 if (rval & RTK_PHYAR_BUSY) 209 break; 210 DELAY(100); 211 } 212 213 if (i == RTK_TIMEOUT) { 214 printf("%s: PHY read failed\n", device_xname(sc->sc_dev)); 215 return 0; 216 } 217 218 return rval & RTK_PHYAR_PHYDATA; 219 } 220 221 static void 222 re_gmii_writereg(device_t dev, int phy, int reg, int data) 223 { 224 struct rtk_softc *sc = device_private(dev); 225 uint32_t rval; 226 int i; 227 228 CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) | 229 (data & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY); 230 DELAY(1000); 231 232 for (i = 0; i < RTK_TIMEOUT; i++) { 233 rval = CSR_READ_4(sc, RTK_PHYAR); 234 if (!(rval & RTK_PHYAR_BUSY)) 235 break; 236 DELAY(100); 237 } 238 239 if (i == RTK_TIMEOUT) { 240 printf("%s: PHY write reg %x <- %x failed\n", 241 device_xname(sc->sc_dev), reg, data); 242 } 243 } 244 245 static int 246 re_miibus_readreg(device_t dev, int phy, int reg) 247 { 248 struct rtk_softc *sc = device_private(dev); 249 uint16_t rval = 0; 250 uint16_t re8139_reg = 0; 251 int s; 252 253 s = splnet(); 254 255 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 256 rval = re_gmii_readreg(dev, phy, reg); 257 splx(s); 258 return rval; 259 } 260 261 /* Pretend the internal PHY is only at address 0 */ 262 if (phy) { 263 splx(s); 264 return 0; 265 } 266 switch (reg) { 267 case MII_BMCR: 268 re8139_reg = RTK_BMCR; 269 break; 270 case MII_BMSR: 271 re8139_reg = RTK_BMSR; 272 break; 273 case MII_ANAR: 274 re8139_reg = RTK_ANAR; 275 break; 276 case MII_ANER: 277 re8139_reg = RTK_ANER; 278 break; 279 case MII_ANLPAR: 280 re8139_reg = RTK_LPAR; 281 break; 282 case MII_PHYIDR1: 283 case MII_PHYIDR2: 284 splx(s); 285 return 0; 286 /* 287 * Allow the rlphy driver to read the media status 288 * register. If we have a link partner which does not 289 * support NWAY, this is the register which will tell 290 * us the results of parallel detection. 291 */ 292 case RTK_MEDIASTAT: 293 rval = CSR_READ_1(sc, RTK_MEDIASTAT); 294 splx(s); 295 return rval; 296 default: 297 printf("%s: bad phy register\n", device_xname(sc->sc_dev)); 298 splx(s); 299 return 0; 300 } 301 rval = CSR_READ_2(sc, re8139_reg); 302 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0 && re8139_reg == RTK_BMCR) { 303 /* 8139C+ has different bit layout. */ 304 rval &= ~(BMCR_LOOP | BMCR_ISO); 305 } 306 splx(s); 307 return rval; 308 } 309 310 static void 311 re_miibus_writereg(device_t dev, int phy, int reg, int data) 312 { 313 struct rtk_softc *sc = device_private(dev); 314 uint16_t re8139_reg = 0; 315 int s; 316 317 s = splnet(); 318 319 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 320 re_gmii_writereg(dev, phy, reg, data); 321 splx(s); 322 return; 323 } 324 325 /* Pretend the internal PHY is only at address 0 */ 326 if (phy) { 327 splx(s); 328 return; 329 } 330 switch (reg) { 331 case MII_BMCR: 332 re8139_reg = RTK_BMCR; 333 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) { 334 /* 8139C+ has different bit layout. */ 335 data &= ~(BMCR_LOOP | BMCR_ISO); 336 } 337 break; 338 case MII_BMSR: 339 re8139_reg = RTK_BMSR; 340 break; 341 case MII_ANAR: 342 re8139_reg = RTK_ANAR; 343 break; 344 case MII_ANER: 345 re8139_reg = RTK_ANER; 346 break; 347 case MII_ANLPAR: 348 re8139_reg = RTK_LPAR; 349 break; 350 case MII_PHYIDR1: 351 case MII_PHYIDR2: 352 splx(s); 353 return; 354 break; 355 default: 356 printf("%s: bad phy register\n", device_xname(sc->sc_dev)); 357 splx(s); 358 return; 359 } 360 CSR_WRITE_2(sc, re8139_reg, data); 361 splx(s); 362 return; 363 } 364 365 static void 366 re_miibus_statchg(struct ifnet *ifp) 367 { 368 369 return; 370 } 371 372 static void 373 re_reset(struct rtk_softc *sc) 374 { 375 int i; 376 377 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET); 378 379 for (i = 0; i < RTK_TIMEOUT; i++) { 380 DELAY(10); 381 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0) 382 break; 383 } 384 if (i == RTK_TIMEOUT) 385 printf("%s: reset never completed!\n", 386 device_xname(sc->sc_dev)); 387 388 /* 389 * NB: Realtek-supplied FreeBSD driver does this only for MACFG_3, 390 * but also says "Rtl8169s sigle chip detected". 391 */ 392 if ((sc->sc_quirk & RTKQ_MACLDPS) != 0) 393 CSR_WRITE_1(sc, RTK_LDPS, 1); 394 395 } 396 397 /* 398 * The following routine is designed to test for a defect on some 399 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 400 * lines connected to the bus, however for a 32-bit only card, they 401 * should be pulled high. The result of this defect is that the 402 * NIC will not work right if you plug it into a 64-bit slot: DMA 403 * operations will be done with 64-bit transfers, which will fail 404 * because the 64-bit data lines aren't connected. 405 * 406 * There's no way to work around this (short of talking a soldering 407 * iron to the board), however we can detect it. The method we use 408 * here is to put the NIC into digital loopback mode, set the receiver 409 * to promiscuous mode, and then try to send a frame. We then compare 410 * the frame data we sent to what was received. If the data matches, 411 * then the NIC is working correctly, otherwise we know the user has 412 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 413 * slot. In the latter case, there's no way the NIC can work correctly, 414 * so we print out a message on the console and abort the device attach. 415 */ 416 417 int 418 re_diag(struct rtk_softc *sc) 419 { 420 struct ifnet *ifp = &sc->ethercom.ec_if; 421 struct mbuf *m0; 422 struct ether_header *eh; 423 struct re_rxsoft *rxs; 424 struct re_desc *cur_rx; 425 bus_dmamap_t dmamap; 426 uint16_t status; 427 uint32_t rxstat; 428 int total_len, i, s, error = 0; 429 static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 430 static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 431 432 /* Allocate a single mbuf */ 433 434 MGETHDR(m0, M_DONTWAIT, MT_DATA); 435 if (m0 == NULL) 436 return ENOBUFS; 437 438 /* 439 * Initialize the NIC in test mode. This sets the chip up 440 * so that it can send and receive frames, but performs the 441 * following special functions: 442 * - Puts receiver in promiscuous mode 443 * - Enables digital loopback mode 444 * - Leaves interrupts turned off 445 */ 446 447 ifp->if_flags |= IFF_PROMISC; 448 sc->re_testmode = 1; 449 re_init(ifp); 450 re_stop(ifp, 0); 451 DELAY(100000); 452 re_init(ifp); 453 454 /* Put some data in the mbuf */ 455 456 eh = mtod(m0, struct ether_header *); 457 memcpy(eh->ether_dhost, &dst, ETHER_ADDR_LEN); 458 memcpy(eh->ether_shost, &src, ETHER_ADDR_LEN); 459 eh->ether_type = htons(ETHERTYPE_IP); 460 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 461 462 /* 463 * Queue the packet, start transmission. 464 */ 465 466 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF); 467 s = splnet(); 468 IF_ENQUEUE(&ifp->if_snd, m0); 469 re_start(ifp); 470 splx(s); 471 m0 = NULL; 472 473 /* Wait for it to propagate through the chip */ 474 475 DELAY(100000); 476 for (i = 0; i < RTK_TIMEOUT; i++) { 477 status = CSR_READ_2(sc, RTK_ISR); 478 if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) == 479 (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) 480 break; 481 DELAY(10); 482 } 483 if (i == RTK_TIMEOUT) { 484 aprint_error_dev(sc->sc_dev, 485 "diagnostic failed, failed to receive packet " 486 "in loopback mode\n"); 487 error = EIO; 488 goto done; 489 } 490 491 /* 492 * The packet should have been dumped into the first 493 * entry in the RX DMA ring. Grab it from there. 494 */ 495 496 rxs = &sc->re_ldata.re_rxsoft[0]; 497 dmamap = rxs->rxs_dmamap; 498 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 499 BUS_DMASYNC_POSTREAD); 500 bus_dmamap_unload(sc->sc_dmat, dmamap); 501 502 m0 = rxs->rxs_mbuf; 503 rxs->rxs_mbuf = NULL; 504 eh = mtod(m0, struct ether_header *); 505 506 RE_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 507 cur_rx = &sc->re_ldata.re_rx_list[0]; 508 rxstat = le32toh(cur_rx->re_cmdstat); 509 total_len = rxstat & sc->re_rxlenmask; 510 511 if (total_len != ETHER_MIN_LEN) { 512 aprint_error_dev(sc->sc_dev, 513 "diagnostic failed, received short packet\n"); 514 error = EIO; 515 goto done; 516 } 517 518 /* Test that the received packet data matches what we sent. */ 519 520 if (memcmp(&eh->ether_dhost, &dst, ETHER_ADDR_LEN) || 521 memcmp(&eh->ether_shost, &src, ETHER_ADDR_LEN) || 522 ntohs(eh->ether_type) != ETHERTYPE_IP) { 523 aprint_error_dev(sc->sc_dev, "WARNING, DMA FAILURE!\n" 524 "expected TX data: %s/%s/0x%x\n" 525 "received RX data: %s/%s/0x%x\n" 526 "You may have a defective 32-bit NIC plugged " 527 "into a 64-bit PCI slot.\n" 528 "Please re-install the NIC in a 32-bit slot " 529 "for proper operation.\n" 530 "Read the re(4) man page for more details.\n" , 531 ether_sprintf(dst), ether_sprintf(src), ETHERTYPE_IP, 532 ether_sprintf(eh->ether_dhost), 533 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); 534 error = EIO; 535 } 536 537 done: 538 /* Turn interface off, release resources */ 539 540 sc->re_testmode = 0; 541 ifp->if_flags &= ~IFF_PROMISC; 542 re_stop(ifp, 0); 543 if (m0 != NULL) 544 m_freem(m0); 545 546 return error; 547 } 548 549 550 /* 551 * Attach the interface. Allocate softc structures, do ifmedia 552 * setup and ethernet/BPF attach. 553 */ 554 void 555 re_attach(struct rtk_softc *sc) 556 { 557 uint8_t eaddr[ETHER_ADDR_LEN]; 558 struct ifnet *ifp; 559 int error = 0, i; 560 561 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 562 uint32_t hwrev; 563 564 /* Revision of 8169/8169S/8110s in bits 30..26, 23 */ 565 hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV; 566 switch (hwrev) { 567 case RTK_HWREV_8169: 568 sc->sc_quirk |= RTKQ_8169NONS; 569 break; 570 case RTK_HWREV_8169S: 571 case RTK_HWREV_8110S: 572 case RTK_HWREV_8169_8110SB: 573 case RTK_HWREV_8169_8110SBL: 574 case RTK_HWREV_8169_8110SC: 575 sc->sc_quirk |= RTKQ_MACLDPS; 576 break; 577 case RTK_HWREV_8168_SPIN1: 578 case RTK_HWREV_8168_SPIN2: 579 case RTK_HWREV_8168_SPIN3: 580 sc->sc_quirk |= RTKQ_MACSTAT; 581 break; 582 case RTK_HWREV_8168C: 583 case RTK_HWREV_8168C_SPIN2: 584 case RTK_HWREV_8168CP: 585 case RTK_HWREV_8168D: 586 case RTK_HWREV_8168DP: 587 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 588 RTKQ_MACSTAT | RTKQ_CMDSTOP; 589 /* 590 * From FreeBSD driver: 591 * 592 * These (8168/8111) controllers support jumbo frame 593 * but it seems that enabling it requires touching 594 * additional magic registers. Depending on MAC 595 * revisions some controllers need to disable 596 * checksum offload. So disable jumbo frame until 597 * I have better idea what it really requires to 598 * make it support. 599 * RTL8168C/CP : supports up to 6KB jumbo frame. 600 * RTL8111C/CP : supports up to 9KB jumbo frame. 601 */ 602 sc->sc_quirk |= RTKQ_NOJUMBO; 603 break; 604 case RTK_HWREV_8168E: 605 case RTK_HWREV_8168H: 606 case RTK_HWREV_8168H_SPIN1: 607 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 608 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM | 609 RTKQ_NOJUMBO; 610 break; 611 case RTK_HWREV_8168E_VL: 612 case RTK_HWREV_8168F: 613 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 614 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO; 615 break; 616 case RTK_HWREV_8168G: 617 case RTK_HWREV_8168G_SPIN1: 618 case RTK_HWREV_8168G_SPIN2: 619 case RTK_HWREV_8168G_SPIN4: 620 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 621 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO | 622 RTKQ_RXDV_GATED; 623 break; 624 case RTK_HWREV_8100E: 625 case RTK_HWREV_8100E_SPIN2: 626 case RTK_HWREV_8101E: 627 sc->sc_quirk |= RTKQ_NOJUMBO; 628 break; 629 case RTK_HWREV_8102E: 630 case RTK_HWREV_8102EL: 631 case RTK_HWREV_8103E: 632 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD | 633 RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO; 634 break; 635 default: 636 aprint_normal_dev(sc->sc_dev, 637 "Unknown revision (0x%08x)\n", hwrev); 638 /* assume the latest features */ 639 sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD; 640 sc->sc_quirk |= RTKQ_NOJUMBO; 641 } 642 643 /* Set RX length mask */ 644 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN; 645 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169; 646 } else { 647 sc->sc_quirk |= RTKQ_NOJUMBO; 648 649 /* Set RX length mask */ 650 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN; 651 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139; 652 } 653 654 /* Reset the adapter. */ 655 re_reset(sc); 656 657 /* 658 * RTL81x9 chips automatically read EEPROM to init MAC address, 659 * and some NAS override its MAC address per own configuration, 660 * so no need to explicitely read EEPROM and set ID registers. 661 */ 662 #ifdef RE_USE_EECMD 663 if ((sc->sc_quirk & RTKQ_NOEECMD) != 0) { 664 /* 665 * Get station address from ID registers. 666 */ 667 for (i = 0; i < ETHER_ADDR_LEN; i++) 668 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i); 669 } else { 670 uint16_t val; 671 int addr_len; 672 673 /* 674 * Get station address from the EEPROM. 675 */ 676 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129) 677 addr_len = RTK_EEADDR_LEN1; 678 else 679 addr_len = RTK_EEADDR_LEN0; 680 681 /* 682 * Get station address from the EEPROM. 683 */ 684 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) { 685 val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len); 686 eaddr[(i * 2) + 0] = val & 0xff; 687 eaddr[(i * 2) + 1] = val >> 8; 688 } 689 } 690 #else 691 /* 692 * Get station address from ID registers. 693 */ 694 for (i = 0; i < ETHER_ADDR_LEN; i++) 695 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i); 696 #endif 697 698 /* Take PHY out of power down mode. */ 699 if ((sc->sc_quirk & RTKQ_PHYWAKE_PM) != 0) 700 CSR_WRITE_1(sc, RTK_PMCH, CSR_READ_1(sc, RTK_PMCH) | 0x80); 701 702 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 703 ether_sprintf(eaddr)); 704 705 if (sc->re_ldata.re_tx_desc_cnt > 706 PAGE_SIZE / sizeof(struct re_desc)) { 707 sc->re_ldata.re_tx_desc_cnt = 708 PAGE_SIZE / sizeof(struct re_desc); 709 } 710 711 aprint_verbose_dev(sc->sc_dev, "using %d tx descriptors\n", 712 sc->re_ldata.re_tx_desc_cnt); 713 KASSERT(RE_NEXT_TX_DESC(sc, RE_TX_DESC_CNT(sc) - 1) == 0); 714 715 /* Allocate DMA'able memory for the TX ring */ 716 if ((error = bus_dmamem_alloc(sc->sc_dmat, RE_TX_LIST_SZ(sc), 717 RE_RING_ALIGN, 0, &sc->re_ldata.re_tx_listseg, 1, 718 &sc->re_ldata.re_tx_listnseg, BUS_DMA_NOWAIT)) != 0) { 719 aprint_error_dev(sc->sc_dev, 720 "can't allocate tx listseg, error = %d\n", error); 721 goto fail_0; 722 } 723 724 /* Load the map for the TX ring. */ 725 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_tx_listseg, 726 sc->re_ldata.re_tx_listnseg, RE_TX_LIST_SZ(sc), 727 (void **)&sc->re_ldata.re_tx_list, 728 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 729 aprint_error_dev(sc->sc_dev, 730 "can't map tx list, error = %d\n", error); 731 goto fail_1; 732 } 733 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc)); 734 735 if ((error = bus_dmamap_create(sc->sc_dmat, RE_TX_LIST_SZ(sc), 1, 736 RE_TX_LIST_SZ(sc), 0, 0, 737 &sc->re_ldata.re_tx_list_map)) != 0) { 738 aprint_error_dev(sc->sc_dev, 739 "can't create tx list map, error = %d\n", error); 740 goto fail_2; 741 } 742 743 744 if ((error = bus_dmamap_load(sc->sc_dmat, 745 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list, 746 RE_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 747 aprint_error_dev(sc->sc_dev, 748 "can't load tx list, error = %d\n", error); 749 goto fail_3; 750 } 751 752 /* Create DMA maps for TX buffers */ 753 for (i = 0; i < RE_TX_QLEN; i++) { 754 error = bus_dmamap_create(sc->sc_dmat, 755 round_page(IP_MAXPACKET), 756 RE_TX_DESC_CNT(sc), RE_TDESC_CMD_FRAGLEN, 757 0, 0, &sc->re_ldata.re_txq[i].txq_dmamap); 758 if (error) { 759 aprint_error_dev(sc->sc_dev, 760 "can't create DMA map for TX\n"); 761 goto fail_4; 762 } 763 } 764 765 /* Allocate DMA'able memory for the RX ring */ 766 /* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */ 767 if ((error = bus_dmamem_alloc(sc->sc_dmat, 768 RE_RX_DMAMEM_SZ, RE_RING_ALIGN, 0, &sc->re_ldata.re_rx_listseg, 1, 769 &sc->re_ldata.re_rx_listnseg, BUS_DMA_NOWAIT)) != 0) { 770 aprint_error_dev(sc->sc_dev, 771 "can't allocate rx listseg, error = %d\n", error); 772 goto fail_4; 773 } 774 775 /* Load the map for the RX ring. */ 776 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_rx_listseg, 777 sc->re_ldata.re_rx_listnseg, RE_RX_DMAMEM_SZ, 778 (void **)&sc->re_ldata.re_rx_list, 779 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 780 aprint_error_dev(sc->sc_dev, 781 "can't map rx list, error = %d\n", error); 782 goto fail_5; 783 } 784 memset(sc->re_ldata.re_rx_list, 0, RE_RX_DMAMEM_SZ); 785 786 if ((error = bus_dmamap_create(sc->sc_dmat, 787 RE_RX_DMAMEM_SZ, 1, RE_RX_DMAMEM_SZ, 0, 0, 788 &sc->re_ldata.re_rx_list_map)) != 0) { 789 aprint_error_dev(sc->sc_dev, 790 "can't create rx list map, error = %d\n", error); 791 goto fail_6; 792 } 793 794 if ((error = bus_dmamap_load(sc->sc_dmat, 795 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list, 796 RE_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 797 aprint_error_dev(sc->sc_dev, 798 "can't load rx list, error = %d\n", error); 799 goto fail_7; 800 } 801 802 /* Create DMA maps for RX buffers */ 803 for (i = 0; i < RE_RX_DESC_CNT; i++) { 804 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 805 0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap); 806 if (error) { 807 aprint_error_dev(sc->sc_dev, 808 "can't create DMA map for RX\n"); 809 goto fail_8; 810 } 811 } 812 813 /* 814 * Record interface as attached. From here, we should not fail. 815 */ 816 sc->sc_flags |= RTK_ATTACHED; 817 818 ifp = &sc->ethercom.ec_if; 819 ifp->if_softc = sc; 820 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 821 ifp->if_mtu = ETHERMTU; 822 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 823 ifp->if_ioctl = re_ioctl; 824 sc->ethercom.ec_capabilities |= 825 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 826 ifp->if_start = re_start; 827 ifp->if_stop = re_stop; 828 829 /* 830 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets, 831 * so we have a workaround to handle the bug by padding 832 * such packets manually. 833 */ 834 ifp->if_capabilities |= 835 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 836 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 837 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 838 IFCAP_TSOv4; 839 840 ifp->if_watchdog = re_watchdog; 841 ifp->if_init = re_init; 842 ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN; 843 ifp->if_capenable = ifp->if_capabilities; 844 IFQ_SET_READY(&ifp->if_snd); 845 846 callout_init(&sc->rtk_tick_ch, 0); 847 848 /* Do MII setup */ 849 sc->mii.mii_ifp = ifp; 850 sc->mii.mii_readreg = re_miibus_readreg; 851 sc->mii.mii_writereg = re_miibus_writereg; 852 sc->mii.mii_statchg = re_miibus_statchg; 853 sc->ethercom.ec_mii = &sc->mii; 854 ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange, 855 ether_mediastatus); 856 mii_attach(sc->sc_dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 857 MII_OFFSET_ANY, 0); 858 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO); 859 860 /* 861 * Call MI attach routine. 862 */ 863 if_attach(ifp); 864 if_deferred_start_init(ifp, NULL); 865 ether_ifattach(ifp, eaddr); 866 867 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 868 RND_TYPE_NET, RND_FLAG_DEFAULT); 869 870 if (pmf_device_register(sc->sc_dev, NULL, NULL)) 871 pmf_class_network_register(sc->sc_dev, ifp); 872 else 873 aprint_error_dev(sc->sc_dev, 874 "couldn't establish power handler\n"); 875 876 return; 877 878 fail_8: 879 /* Destroy DMA maps for RX buffers. */ 880 for (i = 0; i < RE_RX_DESC_CNT; i++) 881 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL) 882 bus_dmamap_destroy(sc->sc_dmat, 883 sc->re_ldata.re_rxsoft[i].rxs_dmamap); 884 885 /* Free DMA'able memory for the RX ring. */ 886 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 887 fail_7: 888 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 889 fail_6: 890 bus_dmamem_unmap(sc->sc_dmat, 891 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ); 892 fail_5: 893 bus_dmamem_free(sc->sc_dmat, 894 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg); 895 896 fail_4: 897 /* Destroy DMA maps for TX buffers. */ 898 for (i = 0; i < RE_TX_QLEN; i++) 899 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL) 900 bus_dmamap_destroy(sc->sc_dmat, 901 sc->re_ldata.re_txq[i].txq_dmamap); 902 903 /* Free DMA'able memory for the TX ring. */ 904 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 905 fail_3: 906 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 907 fail_2: 908 bus_dmamem_unmap(sc->sc_dmat, 909 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 910 fail_1: 911 bus_dmamem_free(sc->sc_dmat, 912 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg); 913 fail_0: 914 return; 915 } 916 917 918 /* 919 * re_activate: 920 * Handle device activation/deactivation requests. 921 */ 922 int 923 re_activate(device_t self, enum devact act) 924 { 925 struct rtk_softc *sc = device_private(self); 926 927 switch (act) { 928 case DVACT_DEACTIVATE: 929 if_deactivate(&sc->ethercom.ec_if); 930 return 0; 931 default: 932 return EOPNOTSUPP; 933 } 934 } 935 936 /* 937 * re_detach: 938 * Detach a rtk interface. 939 */ 940 int 941 re_detach(struct rtk_softc *sc) 942 { 943 struct ifnet *ifp = &sc->ethercom.ec_if; 944 int i; 945 946 /* 947 * Succeed now if there isn't any work to do. 948 */ 949 if ((sc->sc_flags & RTK_ATTACHED) == 0) 950 return 0; 951 952 /* Unhook our tick handler. */ 953 callout_stop(&sc->rtk_tick_ch); 954 955 /* Detach all PHYs. */ 956 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY); 957 958 /* Delete all remaining media. */ 959 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY); 960 961 rnd_detach_source(&sc->rnd_source); 962 ether_ifdetach(ifp); 963 if_detach(ifp); 964 965 /* Destroy DMA maps for RX buffers. */ 966 for (i = 0; i < RE_RX_DESC_CNT; i++) 967 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL) 968 bus_dmamap_destroy(sc->sc_dmat, 969 sc->re_ldata.re_rxsoft[i].rxs_dmamap); 970 971 /* Free DMA'able memory for the RX ring. */ 972 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 973 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map); 974 bus_dmamem_unmap(sc->sc_dmat, 975 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ); 976 bus_dmamem_free(sc->sc_dmat, 977 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg); 978 979 /* Destroy DMA maps for TX buffers. */ 980 for (i = 0; i < RE_TX_QLEN; i++) 981 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL) 982 bus_dmamap_destroy(sc->sc_dmat, 983 sc->re_ldata.re_txq[i].txq_dmamap); 984 985 /* Free DMA'able memory for the TX ring. */ 986 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 987 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map); 988 bus_dmamem_unmap(sc->sc_dmat, 989 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc)); 990 bus_dmamem_free(sc->sc_dmat, 991 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg); 992 993 pmf_device_deregister(sc->sc_dev); 994 995 /* we don't want to run again */ 996 sc->sc_flags &= ~RTK_ATTACHED; 997 998 return 0; 999 } 1000 1001 /* 1002 * re_enable: 1003 * Enable the RTL81X9 chip. 1004 */ 1005 static int 1006 re_enable(struct rtk_softc *sc) 1007 { 1008 1009 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) { 1010 if ((*sc->sc_enable)(sc) != 0) { 1011 printf("%s: device enable failed\n", 1012 device_xname(sc->sc_dev)); 1013 return EIO; 1014 } 1015 sc->sc_flags |= RTK_ENABLED; 1016 } 1017 return 0; 1018 } 1019 1020 /* 1021 * re_disable: 1022 * Disable the RTL81X9 chip. 1023 */ 1024 static void 1025 re_disable(struct rtk_softc *sc) 1026 { 1027 1028 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) { 1029 (*sc->sc_disable)(sc); 1030 sc->sc_flags &= ~RTK_ENABLED; 1031 } 1032 } 1033 1034 static int 1035 re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m) 1036 { 1037 struct mbuf *n = NULL; 1038 bus_dmamap_t map; 1039 struct re_desc *d; 1040 struct re_rxsoft *rxs; 1041 uint32_t cmdstat; 1042 int error; 1043 1044 if (m == NULL) { 1045 MGETHDR(n, M_DONTWAIT, MT_DATA); 1046 if (n == NULL) 1047 return ENOBUFS; 1048 1049 MCLGET(n, M_DONTWAIT); 1050 if ((n->m_flags & M_EXT) == 0) { 1051 m_freem(n); 1052 return ENOBUFS; 1053 } 1054 m = n; 1055 } else 1056 m->m_data = m->m_ext.ext_buf; 1057 1058 /* 1059 * Initialize mbuf length fields and fixup 1060 * alignment so that the frame payload is 1061 * longword aligned. 1062 */ 1063 m->m_len = m->m_pkthdr.len = MCLBYTES - RE_ETHER_ALIGN; 1064 m->m_data += RE_ETHER_ALIGN; 1065 1066 rxs = &sc->re_ldata.re_rxsoft[idx]; 1067 map = rxs->rxs_dmamap; 1068 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1069 BUS_DMA_READ|BUS_DMA_NOWAIT); 1070 1071 if (error) 1072 goto out; 1073 1074 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1075 BUS_DMASYNC_PREREAD); 1076 1077 d = &sc->re_ldata.re_rx_list[idx]; 1078 #ifdef DIAGNOSTIC 1079 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1080 cmdstat = le32toh(d->re_cmdstat); 1081 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1082 if (cmdstat & RE_RDESC_STAT_OWN) { 1083 panic("%s: tried to map busy RX descriptor", 1084 device_xname(sc->sc_dev)); 1085 } 1086 #endif 1087 1088 rxs->rxs_mbuf = m; 1089 1090 d->re_vlanctl = 0; 1091 cmdstat = map->dm_segs[0].ds_len; 1092 if (idx == (RE_RX_DESC_CNT - 1)) 1093 cmdstat |= RE_RDESC_CMD_EOR; 1094 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1095 d->re_cmdstat = htole32(cmdstat); 1096 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1097 cmdstat |= RE_RDESC_CMD_OWN; 1098 d->re_cmdstat = htole32(cmdstat); 1099 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1100 1101 return 0; 1102 out: 1103 if (n != NULL) 1104 m_freem(n); 1105 return ENOMEM; 1106 } 1107 1108 static int 1109 re_tx_list_init(struct rtk_softc *sc) 1110 { 1111 int i; 1112 1113 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc)); 1114 for (i = 0; i < RE_TX_QLEN; i++) { 1115 sc->re_ldata.re_txq[i].txq_mbuf = NULL; 1116 } 1117 1118 bus_dmamap_sync(sc->sc_dmat, 1119 sc->re_ldata.re_tx_list_map, 0, 1120 sc->re_ldata.re_tx_list_map->dm_mapsize, 1121 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1122 sc->re_ldata.re_txq_prodidx = 0; 1123 sc->re_ldata.re_txq_considx = 0; 1124 sc->re_ldata.re_txq_free = RE_TX_QLEN; 1125 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc); 1126 sc->re_ldata.re_tx_nextfree = 0; 1127 1128 return 0; 1129 } 1130 1131 static int 1132 re_rx_list_init(struct rtk_softc *sc) 1133 { 1134 int i; 1135 1136 memset(sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ); 1137 1138 for (i = 0; i < RE_RX_DESC_CNT; i++) { 1139 if (re_newbuf(sc, i, NULL) == ENOBUFS) 1140 return ENOBUFS; 1141 } 1142 1143 sc->re_ldata.re_rx_prodidx = 0; 1144 sc->re_head = sc->re_tail = NULL; 1145 1146 return 0; 1147 } 1148 1149 /* 1150 * RX handler for C+ and 8169. For the gigE chips, we support 1151 * the reception of jumbo frames that have been fragmented 1152 * across multiple 2K mbuf cluster buffers. 1153 */ 1154 static void 1155 re_rxeof(struct rtk_softc *sc) 1156 { 1157 struct mbuf *m; 1158 struct ifnet *ifp; 1159 int i, total_len; 1160 struct re_desc *cur_rx; 1161 struct re_rxsoft *rxs; 1162 uint32_t rxstat, rxvlan; 1163 1164 ifp = &sc->ethercom.ec_if; 1165 1166 for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) { 1167 cur_rx = &sc->re_ldata.re_rx_list[i]; 1168 RE_RXDESCSYNC(sc, i, 1169 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1170 rxstat = le32toh(cur_rx->re_cmdstat); 1171 rxvlan = le32toh(cur_rx->re_vlanctl); 1172 RE_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1173 if ((rxstat & RE_RDESC_STAT_OWN) != 0) { 1174 break; 1175 } 1176 total_len = rxstat & sc->re_rxlenmask; 1177 rxs = &sc->re_ldata.re_rxsoft[i]; 1178 m = rxs->rxs_mbuf; 1179 1180 /* Invalidate the RX mbuf and unload its map */ 1181 1182 bus_dmamap_sync(sc->sc_dmat, 1183 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1184 BUS_DMASYNC_POSTREAD); 1185 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1186 1187 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1188 m->m_len = MCLBYTES - RE_ETHER_ALIGN; 1189 if (sc->re_head == NULL) 1190 sc->re_head = sc->re_tail = m; 1191 else { 1192 m_remove_pkthdr(m); 1193 sc->re_tail->m_next = m; 1194 sc->re_tail = m; 1195 } 1196 re_newbuf(sc, i, NULL); 1197 continue; 1198 } 1199 1200 /* 1201 * NOTE: for the 8139C+, the frame length field 1202 * is always 12 bits in size, but for the gigE chips, 1203 * it is 13 bits (since the max RX frame length is 16K). 1204 * Unfortunately, all 32 bits in the status word 1205 * were already used, so to make room for the extra 1206 * length bit, RealTek took out the 'frame alignment 1207 * error' bit and shifted the other status bits 1208 * over one slot. The OWN, EOR, FS and LS bits are 1209 * still in the same places. We have already extracted 1210 * the frame length and checked the OWN bit, so rather 1211 * than using an alternate bit mapping, we shift the 1212 * status bits one space to the right so we can evaluate 1213 * them using the 8169 status as though it was in the 1214 * same format as that of the 8139C+. 1215 */ 1216 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) 1217 rxstat >>= 1; 1218 1219 if (__predict_false((rxstat & RE_RDESC_STAT_RXERRSUM) != 0)) { 1220 #ifdef RE_DEBUG 1221 printf("%s: RX error (rxstat = 0x%08x)", 1222 device_xname(sc->sc_dev), rxstat); 1223 if (rxstat & RE_RDESC_STAT_FRALIGN) 1224 printf(", frame alignment error"); 1225 if (rxstat & RE_RDESC_STAT_BUFOFLOW) 1226 printf(", out of buffer space"); 1227 if (rxstat & RE_RDESC_STAT_FIFOOFLOW) 1228 printf(", FIFO overrun"); 1229 if (rxstat & RE_RDESC_STAT_GIANT) 1230 printf(", giant packet"); 1231 if (rxstat & RE_RDESC_STAT_RUNT) 1232 printf(", runt packet"); 1233 if (rxstat & RE_RDESC_STAT_CRCERR) 1234 printf(", CRC error"); 1235 printf("\n"); 1236 #endif 1237 ifp->if_ierrors++; 1238 /* 1239 * If this is part of a multi-fragment packet, 1240 * discard all the pieces. 1241 */ 1242 if (sc->re_head != NULL) { 1243 m_freem(sc->re_head); 1244 sc->re_head = sc->re_tail = NULL; 1245 } 1246 re_newbuf(sc, i, m); 1247 continue; 1248 } 1249 1250 /* 1251 * If allocating a replacement mbuf fails, 1252 * reload the current one. 1253 */ 1254 1255 if (__predict_false(re_newbuf(sc, i, NULL) != 0)) { 1256 ifp->if_ierrors++; 1257 if (sc->re_head != NULL) { 1258 m_freem(sc->re_head); 1259 sc->re_head = sc->re_tail = NULL; 1260 } 1261 re_newbuf(sc, i, m); 1262 continue; 1263 } 1264 1265 if (sc->re_head != NULL) { 1266 m->m_len = total_len % (MCLBYTES - RE_ETHER_ALIGN); 1267 /* 1268 * Special case: if there's 4 bytes or less 1269 * in this buffer, the mbuf can be discarded: 1270 * the last 4 bytes is the CRC, which we don't 1271 * care about anyway. 1272 */ 1273 if (m->m_len <= ETHER_CRC_LEN) { 1274 sc->re_tail->m_len -= 1275 (ETHER_CRC_LEN - m->m_len); 1276 m_freem(m); 1277 } else { 1278 m->m_len -= ETHER_CRC_LEN; 1279 m_remove_pkthdr(m); 1280 sc->re_tail->m_next = m; 1281 } 1282 m = sc->re_head; 1283 sc->re_head = sc->re_tail = NULL; 1284 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1285 } else 1286 m->m_pkthdr.len = m->m_len = 1287 (total_len - ETHER_CRC_LEN); 1288 1289 m_set_rcvif(m, ifp); 1290 1291 /* Do RX checksumming */ 1292 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) { 1293 /* Check IP header checksum */ 1294 if ((rxstat & RE_RDESC_STAT_PROTOID) != 0) { 1295 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1296 if (rxstat & RE_RDESC_STAT_IPSUMBAD) 1297 m->m_pkthdr.csum_flags |= 1298 M_CSUM_IPv4_BAD; 1299 1300 /* Check TCP/UDP checksum */ 1301 if (RE_TCPPKT(rxstat)) { 1302 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1303 if (rxstat & RE_RDESC_STAT_TCPSUMBAD) 1304 m->m_pkthdr.csum_flags |= 1305 M_CSUM_TCP_UDP_BAD; 1306 } else if (RE_UDPPKT(rxstat)) { 1307 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1308 if (rxstat & RE_RDESC_STAT_UDPSUMBAD) { 1309 /* 1310 * XXX: 8139C+ thinks UDP csum 1311 * 0xFFFF is bad, force software 1312 * calculation. 1313 */ 1314 if (sc->sc_quirk & RTKQ_8139CPLUS) 1315 m->m_pkthdr.csum_flags 1316 &= ~M_CSUM_UDPv4; 1317 else 1318 m->m_pkthdr.csum_flags 1319 |= M_CSUM_TCP_UDP_BAD; 1320 } 1321 } 1322 } 1323 } else { 1324 /* Check IPv4 header checksum */ 1325 if ((rxvlan & RE_RDESC_VLANCTL_IPV4) != 0) { 1326 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1327 if (rxstat & RE_RDESC_STAT_IPSUMBAD) 1328 m->m_pkthdr.csum_flags |= 1329 M_CSUM_IPv4_BAD; 1330 1331 /* Check TCPv4/UDPv4 checksum */ 1332 if (RE_TCPPKT(rxstat)) { 1333 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1334 if (rxstat & RE_RDESC_STAT_TCPSUMBAD) 1335 m->m_pkthdr.csum_flags |= 1336 M_CSUM_TCP_UDP_BAD; 1337 } else if (RE_UDPPKT(rxstat)) { 1338 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1339 if (rxstat & RE_RDESC_STAT_UDPSUMBAD) 1340 m->m_pkthdr.csum_flags |= 1341 M_CSUM_TCP_UDP_BAD; 1342 } 1343 } 1344 /* XXX Check TCPv6/UDPv6 checksum? */ 1345 } 1346 1347 if (rxvlan & RE_RDESC_VLANCTL_TAG) { 1348 vlan_set_tag(m, 1349 bswap16(rxvlan & RE_RDESC_VLANCTL_DATA)); 1350 } 1351 if_percpuq_enqueue(ifp->if_percpuq, m); 1352 } 1353 1354 sc->re_ldata.re_rx_prodidx = i; 1355 } 1356 1357 static void 1358 re_txeof(struct rtk_softc *sc) 1359 { 1360 struct ifnet *ifp; 1361 struct re_txq *txq; 1362 uint32_t txstat; 1363 int idx, descidx; 1364 1365 ifp = &sc->ethercom.ec_if; 1366 1367 for (idx = sc->re_ldata.re_txq_considx; 1368 sc->re_ldata.re_txq_free < RE_TX_QLEN; 1369 idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) { 1370 txq = &sc->re_ldata.re_txq[idx]; 1371 KASSERT(txq->txq_mbuf != NULL); 1372 1373 descidx = txq->txq_descidx; 1374 RE_TXDESCSYNC(sc, descidx, 1375 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1376 txstat = 1377 le32toh(sc->re_ldata.re_tx_list[descidx].re_cmdstat); 1378 RE_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1379 KASSERT((txstat & RE_TDESC_CMD_EOF) != 0); 1380 if (txstat & RE_TDESC_CMD_OWN) { 1381 break; 1382 } 1383 1384 sc->re_ldata.re_tx_free += txq->txq_nsegs; 1385 KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc)); 1386 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1387 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1388 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1389 m_freem(txq->txq_mbuf); 1390 txq->txq_mbuf = NULL; 1391 1392 if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT)) 1393 ifp->if_collisions++; 1394 if (txstat & RE_TDESC_STAT_TXERRSUM) 1395 ifp->if_oerrors++; 1396 else 1397 ifp->if_opackets++; 1398 } 1399 1400 sc->re_ldata.re_txq_considx = idx; 1401 1402 if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD) 1403 ifp->if_flags &= ~IFF_OACTIVE; 1404 1405 /* 1406 * If not all descriptors have been released reaped yet, 1407 * reload the timer so that we will eventually get another 1408 * interrupt that will cause us to re-enter this routine. 1409 * This is done in case the transmitter has gone idle. 1410 */ 1411 if (sc->re_ldata.re_txq_free < RE_TX_QLEN) { 1412 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) 1413 CSR_WRITE_4(sc, RTK_TIMERCNT, 1); 1414 if ((sc->sc_quirk & RTKQ_PCIE) != 0) { 1415 /* 1416 * Some chips will ignore a second TX request 1417 * issued while an existing transmission is in 1418 * progress. If the transmitter goes idle but 1419 * there are still packets waiting to be sent, 1420 * we need to restart the channel here to flush 1421 * them out. This only seems to be required with 1422 * the PCIe devices. 1423 */ 1424 CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START); 1425 } 1426 } else 1427 ifp->if_timer = 0; 1428 } 1429 1430 static void 1431 re_tick(void *arg) 1432 { 1433 struct rtk_softc *sc = arg; 1434 int s; 1435 1436 /* XXX: just return for 8169S/8110S with rev 2 or newer phy */ 1437 s = splnet(); 1438 1439 mii_tick(&sc->mii); 1440 splx(s); 1441 1442 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc); 1443 } 1444 1445 int 1446 re_intr(void *arg) 1447 { 1448 struct rtk_softc *sc = arg; 1449 struct ifnet *ifp; 1450 uint16_t status; 1451 int handled = 0; 1452 1453 if (!device_has_power(sc->sc_dev)) 1454 return 0; 1455 1456 ifp = &sc->ethercom.ec_if; 1457 1458 if ((ifp->if_flags & IFF_UP) == 0) 1459 return 0; 1460 1461 const uint16_t status_mask = (sc->sc_quirk & RTKQ_IM_HW) ? 1462 RTK_INTRS_IM_HW : RTK_INTRS_CPLUS; 1463 1464 for (;;) { 1465 1466 status = CSR_READ_2(sc, RTK_ISR); 1467 /* If the card has gone away the read returns 0xffff. */ 1468 if (status == 0xffff) 1469 break; 1470 if (status) { 1471 handled = 1; 1472 CSR_WRITE_2(sc, RTK_ISR, status); 1473 } 1474 1475 if ((status & status_mask) == 0) 1476 break; 1477 1478 if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR)) 1479 re_rxeof(sc); 1480 1481 if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR | 1482 RTK_ISR_TX_DESC_UNAVAIL | RTK_ISR_TX_OK)) 1483 re_txeof(sc); 1484 1485 if (status & RTK_ISR_SYSTEM_ERR) { 1486 re_init(ifp); 1487 } 1488 1489 if (status & RTK_ISR_LINKCHG) { 1490 callout_stop(&sc->rtk_tick_ch); 1491 re_tick(sc); 1492 } 1493 } 1494 1495 if (handled) 1496 if_schedule_deferred_start(ifp); 1497 1498 rnd_add_uint32(&sc->rnd_source, status); 1499 1500 return handled; 1501 } 1502 1503 1504 1505 /* 1506 * Main transmit routine for C+ and gigE NICs. 1507 */ 1508 1509 static void 1510 re_start(struct ifnet *ifp) 1511 { 1512 struct rtk_softc *sc; 1513 struct mbuf *m; 1514 bus_dmamap_t map; 1515 struct re_txq *txq; 1516 struct re_desc *d; 1517 uint32_t cmdstat, re_flags, vlanctl; 1518 int ofree, idx, error, nsegs, seg; 1519 int startdesc, curdesc, lastdesc; 1520 bool pad; 1521 1522 sc = ifp->if_softc; 1523 ofree = sc->re_ldata.re_txq_free; 1524 1525 for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) { 1526 1527 IFQ_POLL(&ifp->if_snd, m); 1528 if (m == NULL) 1529 break; 1530 1531 if (sc->re_ldata.re_txq_free == 0 || 1532 sc->re_ldata.re_tx_free == 0) { 1533 /* no more free slots left */ 1534 ifp->if_flags |= IFF_OACTIVE; 1535 break; 1536 } 1537 1538 /* 1539 * Set up checksum offload. Note: checksum offload bits must 1540 * appear in all descriptors of a multi-descriptor transmit 1541 * attempt. (This is according to testing done with an 8169 1542 * chip. I'm not sure if this is a requirement or a bug.) 1543 */ 1544 1545 vlanctl = 0; 1546 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) { 1547 uint32_t segsz = m->m_pkthdr.segsz; 1548 1549 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) { 1550 re_flags = RE_TDESC_CMD_LGSEND | 1551 (segsz << RE_TDESC_CMD_MSSVAL_SHIFT); 1552 } else { 1553 re_flags = RE_TDESC_CMD_LGSEND_V4; 1554 vlanctl |= 1555 (segsz << RE_TDESC_VLANCTL_MSSVAL_SHIFT); 1556 } 1557 } else { 1558 /* 1559 * set RE_TDESC_CMD_IPCSUM if any checksum offloading 1560 * is requested. otherwise, RE_TDESC_CMD_TCPCSUM/ 1561 * RE_TDESC_CMD_UDPCSUM doesn't make effects. 1562 */ 1563 re_flags = 0; 1564 if ((m->m_pkthdr.csum_flags & 1565 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1566 != 0) { 1567 if ((sc->sc_quirk & RTKQ_DESCV2) == 0) { 1568 re_flags |= RE_TDESC_CMD_IPCSUM; 1569 if (m->m_pkthdr.csum_flags & 1570 M_CSUM_TCPv4) { 1571 re_flags |= 1572 RE_TDESC_CMD_TCPCSUM; 1573 } else if (m->m_pkthdr.csum_flags & 1574 M_CSUM_UDPv4) { 1575 re_flags |= 1576 RE_TDESC_CMD_UDPCSUM; 1577 } 1578 } else { 1579 vlanctl |= RE_TDESC_VLANCTL_IPCSUM; 1580 if (m->m_pkthdr.csum_flags & 1581 M_CSUM_TCPv4) { 1582 vlanctl |= 1583 RE_TDESC_VLANCTL_TCPCSUM; 1584 } else if (m->m_pkthdr.csum_flags & 1585 M_CSUM_UDPv4) { 1586 vlanctl |= 1587 RE_TDESC_VLANCTL_UDPCSUM; 1588 } 1589 } 1590 } 1591 } 1592 1593 txq = &sc->re_ldata.re_txq[idx]; 1594 map = txq->txq_dmamap; 1595 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1596 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1597 1598 if (__predict_false(error)) { 1599 /* XXX try to defrag if EFBIG? */ 1600 printf("%s: can't map mbuf (error %d)\n", 1601 device_xname(sc->sc_dev), error); 1602 1603 IFQ_DEQUEUE(&ifp->if_snd, m); 1604 m_freem(m); 1605 ifp->if_oerrors++; 1606 continue; 1607 } 1608 1609 nsegs = map->dm_nsegs; 1610 pad = false; 1611 if (__predict_false(m->m_pkthdr.len <= RE_IP4CSUMTX_PADLEN && 1612 (re_flags & RE_TDESC_CMD_IPCSUM) != 0 && 1613 (sc->sc_quirk & RTKQ_DESCV2) == 0)) { 1614 pad = true; 1615 nsegs++; 1616 } 1617 1618 if (nsegs > sc->re_ldata.re_tx_free) { 1619 /* 1620 * Not enough free descriptors to transmit this packet. 1621 */ 1622 ifp->if_flags |= IFF_OACTIVE; 1623 bus_dmamap_unload(sc->sc_dmat, map); 1624 break; 1625 } 1626 1627 IFQ_DEQUEUE(&ifp->if_snd, m); 1628 1629 /* 1630 * Make sure that the caches are synchronized before we 1631 * ask the chip to start DMA for the packet data. 1632 */ 1633 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1634 BUS_DMASYNC_PREWRITE); 1635 1636 /* 1637 * Set up hardware VLAN tagging. Note: vlan tag info must 1638 * appear in all descriptors of a multi-descriptor 1639 * transmission attempt. 1640 */ 1641 if (vlan_has_tag(m)) 1642 vlanctl |= bswap16(vlan_get_tag(m)) | 1643 RE_TDESC_VLANCTL_TAG; 1644 1645 /* 1646 * Map the segment array into descriptors. 1647 * Note that we set the start-of-frame and 1648 * end-of-frame markers for either TX or RX, 1649 * but they really only have meaning in the TX case. 1650 * (In the RX case, it's the chip that tells us 1651 * where packets begin and end.) 1652 * We also keep track of the end of the ring 1653 * and set the end-of-ring bits as needed, 1654 * and we set the ownership bits in all except 1655 * the very first descriptor. (The caller will 1656 * set this descriptor later when it start 1657 * transmission or reception.) 1658 */ 1659 curdesc = startdesc = sc->re_ldata.re_tx_nextfree; 1660 lastdesc = -1; 1661 for (seg = 0; seg < map->dm_nsegs; 1662 seg++, curdesc = RE_NEXT_TX_DESC(sc, curdesc)) { 1663 d = &sc->re_ldata.re_tx_list[curdesc]; 1664 #ifdef DIAGNOSTIC 1665 RE_TXDESCSYNC(sc, curdesc, 1666 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1667 cmdstat = le32toh(d->re_cmdstat); 1668 RE_TXDESCSYNC(sc, curdesc, BUS_DMASYNC_PREREAD); 1669 if (cmdstat & RE_TDESC_STAT_OWN) { 1670 panic("%s: tried to map busy TX descriptor", 1671 device_xname(sc->sc_dev)); 1672 } 1673 #endif 1674 1675 d->re_vlanctl = htole32(vlanctl); 1676 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1677 cmdstat = re_flags | map->dm_segs[seg].ds_len; 1678 if (seg == 0) 1679 cmdstat |= RE_TDESC_CMD_SOF; 1680 else 1681 cmdstat |= RE_TDESC_CMD_OWN; 1682 if (curdesc == (RE_TX_DESC_CNT(sc) - 1)) 1683 cmdstat |= RE_TDESC_CMD_EOR; 1684 if (seg == nsegs - 1) { 1685 cmdstat |= RE_TDESC_CMD_EOF; 1686 lastdesc = curdesc; 1687 } 1688 d->re_cmdstat = htole32(cmdstat); 1689 RE_TXDESCSYNC(sc, curdesc, 1690 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1691 } 1692 if (__predict_false(pad)) { 1693 d = &sc->re_ldata.re_tx_list[curdesc]; 1694 d->re_vlanctl = htole32(vlanctl); 1695 re_set_bufaddr(d, RE_TXPADDADDR(sc)); 1696 cmdstat = re_flags | 1697 RE_TDESC_CMD_OWN | RE_TDESC_CMD_EOF | 1698 (RE_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1699 if (curdesc == (RE_TX_DESC_CNT(sc) - 1)) 1700 cmdstat |= RE_TDESC_CMD_EOR; 1701 d->re_cmdstat = htole32(cmdstat); 1702 RE_TXDESCSYNC(sc, curdesc, 1703 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1704 lastdesc = curdesc; 1705 curdesc = RE_NEXT_TX_DESC(sc, curdesc); 1706 } 1707 KASSERT(lastdesc != -1); 1708 1709 /* Transfer ownership of packet to the chip. */ 1710 1711 sc->re_ldata.re_tx_list[startdesc].re_cmdstat |= 1712 htole32(RE_TDESC_CMD_OWN); 1713 RE_TXDESCSYNC(sc, startdesc, 1714 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1715 1716 /* update info of TX queue and descriptors */ 1717 txq->txq_mbuf = m; 1718 txq->txq_descidx = lastdesc; 1719 txq->txq_nsegs = nsegs; 1720 1721 sc->re_ldata.re_txq_free--; 1722 sc->re_ldata.re_tx_free -= nsegs; 1723 sc->re_ldata.re_tx_nextfree = curdesc; 1724 1725 /* 1726 * If there's a BPF listener, bounce a copy of this frame 1727 * to him. 1728 */ 1729 bpf_mtap(ifp, m, BPF_D_OUT); 1730 } 1731 1732 if (sc->re_ldata.re_txq_free < ofree) { 1733 /* 1734 * TX packets are enqueued. 1735 */ 1736 sc->re_ldata.re_txq_prodidx = idx; 1737 1738 /* 1739 * Start the transmitter to poll. 1740 * 1741 * RealTek put the TX poll request register in a different 1742 * location on the 8169 gigE chip. I don't know why. 1743 */ 1744 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) 1745 CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START); 1746 else 1747 CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START); 1748 1749 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) { 1750 /* 1751 * Use the countdown timer for interrupt moderation. 1752 * 'TX done' interrupts are disabled. Instead, we reset 1753 * the countdown timer, which will begin counting until 1754 * it hits the value in the TIMERINT register, and then 1755 * trigger an interrupt. Each time we write to the 1756 * TIMERCNT register, the timer count is reset to 0. 1757 */ 1758 CSR_WRITE_4(sc, RTK_TIMERCNT, 1); 1759 } 1760 1761 /* 1762 * Set a timeout in case the chip goes out to lunch. 1763 */ 1764 ifp->if_timer = 5; 1765 } 1766 } 1767 1768 static int 1769 re_init(struct ifnet *ifp) 1770 { 1771 struct rtk_softc *sc = ifp->if_softc; 1772 uint32_t rxcfg = 0; 1773 uint16_t cfg; 1774 int error; 1775 #ifdef RE_USE_EECMD 1776 const uint8_t *enaddr; 1777 uint32_t reg; 1778 #endif 1779 1780 if ((error = re_enable(sc)) != 0) 1781 goto out; 1782 1783 /* 1784 * Cancel pending I/O and free all RX/TX buffers. 1785 */ 1786 re_stop(ifp, 0); 1787 1788 re_reset(sc); 1789 1790 /* 1791 * Enable C+ RX and TX mode, as well as VLAN stripping and 1792 * RX checksum offload. We must configure the C+ register 1793 * before all others. 1794 */ 1795 cfg = RE_CPLUSCMD_PCI_MRW; 1796 1797 /* 1798 * XXX: For old 8169 set bit 14. 1799 * For 8169S/8110S and above, do not set bit 14. 1800 */ 1801 if ((sc->sc_quirk & RTKQ_8169NONS) != 0) 1802 cfg |= (0x1 << 14); 1803 1804 if ((sc->ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0) 1805 cfg |= RE_CPLUSCMD_VLANSTRIP; 1806 if ((ifp->if_capenable & (IFCAP_CSUM_IPv4_Rx | 1807 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) != 0) 1808 cfg |= RE_CPLUSCMD_RXCSUM_ENB; 1809 if ((sc->sc_quirk & RTKQ_MACSTAT) != 0) { 1810 cfg |= RE_CPLUSCMD_MACSTAT_DIS; 1811 cfg |= RE_CPLUSCMD_TXENB; 1812 } else 1813 cfg |= RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB; 1814 1815 CSR_WRITE_2(sc, RTK_CPLUS_CMD, cfg); 1816 1817 /* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */ 1818 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) { 1819 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) { 1820 CSR_WRITE_2(sc, RTK_IM, 0x0000); 1821 } else { 1822 CSR_WRITE_2(sc, RTK_IM, 0x5151); 1823 } 1824 } 1825 1826 DELAY(10000); 1827 1828 #ifdef RE_USE_EECMD 1829 /* 1830 * Init our MAC address. Even though the chipset 1831 * documentation doesn't mention it, we need to enter "Config 1832 * register write enable" mode to modify the ID registers. 1833 */ 1834 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG); 1835 enaddr = CLLADDR(ifp->if_sadl); 1836 reg = enaddr[0] | (enaddr[1] << 8) | 1837 (enaddr[2] << 16) | (enaddr[3] << 24); 1838 CSR_WRITE_4(sc, RTK_IDR0, reg); 1839 reg = enaddr[4] | (enaddr[5] << 8); 1840 CSR_WRITE_4(sc, RTK_IDR4, reg); 1841 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF); 1842 #endif 1843 1844 /* 1845 * For C+ mode, initialize the RX descriptors and mbufs. 1846 */ 1847 re_rx_list_init(sc); 1848 re_tx_list_init(sc); 1849 1850 /* 1851 * Load the addresses of the RX and TX lists into the chip. 1852 */ 1853 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI, 1854 RE_ADDR_HI(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr)); 1855 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO, 1856 RE_ADDR_LO(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr)); 1857 1858 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI, 1859 RE_ADDR_HI(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr)); 1860 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO, 1861 RE_ADDR_LO(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr)); 1862 1863 if (sc->sc_quirk & RTKQ_RXDV_GATED) { 1864 CSR_WRITE_4(sc, RTK_MISC, 1865 CSR_READ_4(sc, RTK_MISC) & ~RTK_MISC_RXDV_GATED_EN); 1866 } 1867 1868 /* 1869 * Enable transmit and receive. 1870 */ 1871 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 1872 1873 /* 1874 * Set the initial TX and RX configuration. 1875 */ 1876 if (sc->re_testmode && (sc->sc_quirk & RTKQ_8169NONS) != 0) { 1877 /* test mode is needed only for old 8169 */ 1878 CSR_WRITE_4(sc, RTK_TXCFG, 1879 RE_TXCFG_CONFIG | RTK_LOOPTEST_ON); 1880 } else 1881 CSR_WRITE_4(sc, RTK_TXCFG, RE_TXCFG_CONFIG); 1882 1883 CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16); 1884 1885 CSR_WRITE_4(sc, RTK_RXCFG, RE_RXCFG_CONFIG); 1886 1887 /* Set the individual bit to receive frames for this host only. */ 1888 rxcfg = CSR_READ_4(sc, RTK_RXCFG); 1889 rxcfg |= RTK_RXCFG_RX_INDIV; 1890 1891 /* If we want promiscuous mode, set the allframes bit. */ 1892 if (ifp->if_flags & IFF_PROMISC) 1893 rxcfg |= RTK_RXCFG_RX_ALLPHYS; 1894 else 1895 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS; 1896 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1897 1898 /* 1899 * Set capture broadcast bit to capture broadcast frames. 1900 */ 1901 if (ifp->if_flags & IFF_BROADCAST) 1902 rxcfg |= RTK_RXCFG_RX_BROAD; 1903 else 1904 rxcfg &= ~RTK_RXCFG_RX_BROAD; 1905 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1906 1907 /* 1908 * Program the multicast filter, if necessary. 1909 */ 1910 rtk_setmulti(sc); 1911 1912 /* 1913 * Enable interrupts. 1914 */ 1915 if (sc->re_testmode) 1916 CSR_WRITE_2(sc, RTK_IMR, 0); 1917 else if ((sc->sc_quirk & RTKQ_IM_HW) != 0) 1918 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_IM_HW); 1919 else 1920 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS); 1921 1922 /* Start RX/TX process. */ 1923 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0); 1924 #ifdef notdef 1925 /* Enable receiver and transmitter. */ 1926 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB); 1927 #endif 1928 1929 /* 1930 * Initialize the timer interrupt register so that 1931 * a timer interrupt will be generated once the timer 1932 * reaches a certain number of ticks. The timer is 1933 * reloaded on each transmit. This gives us TX interrupt 1934 * moderation, which dramatically improves TX frame rate. 1935 */ 1936 1937 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) 1938 CSR_WRITE_4(sc, RTK_TIMERINT, 0x400); 1939 else { 1940 if ((sc->sc_quirk & RTKQ_IM_HW) == 0) { 1941 if ((sc->sc_quirk & RTKQ_PCIE) != 0) { 1942 CSR_WRITE_4(sc, RTK_TIMERINT_8169, 15000); 1943 } else { 1944 CSR_WRITE_4(sc, RTK_TIMERINT_8169, 0x800); 1945 } 1946 } else { 1947 CSR_WRITE_4(sc, RTK_TIMERINT_8169, 0); 1948 } 1949 1950 /* 1951 * For 8169 gigE NICs, set the max allowed RX packet 1952 * size so we can receive jumbo frames. 1953 */ 1954 CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383); 1955 } 1956 1957 if (sc->re_testmode) 1958 return 0; 1959 1960 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD); 1961 1962 ifp->if_flags |= IFF_RUNNING; 1963 ifp->if_flags &= ~IFF_OACTIVE; 1964 1965 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc); 1966 1967 out: 1968 if (error) { 1969 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1970 ifp->if_timer = 0; 1971 printf("%s: interface not running\n", 1972 device_xname(sc->sc_dev)); 1973 } 1974 1975 return error; 1976 } 1977 1978 static int 1979 re_ioctl(struct ifnet *ifp, u_long command, void *data) 1980 { 1981 struct rtk_softc *sc = ifp->if_softc; 1982 struct ifreq *ifr = data; 1983 int s, error = 0; 1984 1985 s = splnet(); 1986 1987 switch (command) { 1988 case SIOCSIFMTU: 1989 /* 1990 * Disable jumbo frames if it's not supported. 1991 */ 1992 if ((sc->sc_quirk & RTKQ_NOJUMBO) != 0 && 1993 ifr->ifr_mtu > ETHERMTU) { 1994 error = EINVAL; 1995 break; 1996 } 1997 1998 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO) 1999 error = EINVAL; 2000 else if ((error = ifioctl_common(ifp, command, data)) == 2001 ENETRESET) 2002 error = 0; 2003 break; 2004 default: 2005 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 2006 break; 2007 2008 error = 0; 2009 2010 if (command == SIOCSIFCAP) 2011 error = (*ifp->if_init)(ifp); 2012 else if (command != SIOCADDMULTI && command != SIOCDELMULTI) 2013 ; 2014 else if (ifp->if_flags & IFF_RUNNING) 2015 rtk_setmulti(sc); 2016 break; 2017 } 2018 2019 splx(s); 2020 2021 return error; 2022 } 2023 2024 static void 2025 re_watchdog(struct ifnet *ifp) 2026 { 2027 struct rtk_softc *sc; 2028 int s; 2029 2030 sc = ifp->if_softc; 2031 s = splnet(); 2032 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2033 ifp->if_oerrors++; 2034 2035 re_txeof(sc); 2036 re_rxeof(sc); 2037 2038 re_init(ifp); 2039 2040 splx(s); 2041 } 2042 2043 /* 2044 * Stop the adapter and free any mbufs allocated to the 2045 * RX and TX lists. 2046 */ 2047 static void 2048 re_stop(struct ifnet *ifp, int disable) 2049 { 2050 int i; 2051 struct rtk_softc *sc = ifp->if_softc; 2052 2053 callout_stop(&sc->rtk_tick_ch); 2054 2055 mii_down(&sc->mii); 2056 2057 if ((sc->sc_quirk & RTKQ_CMDSTOP) != 0) 2058 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_STOPREQ | RTK_CMD_TX_ENB | 2059 RTK_CMD_RX_ENB); 2060 else 2061 CSR_WRITE_1(sc, RTK_COMMAND, 0x00); 2062 DELAY(1000); 2063 CSR_WRITE_2(sc, RTK_IMR, 0x0000); 2064 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF); 2065 2066 if (sc->re_head != NULL) { 2067 m_freem(sc->re_head); 2068 sc->re_head = sc->re_tail = NULL; 2069 } 2070 2071 /* Free the TX list buffers. */ 2072 for (i = 0; i < RE_TX_QLEN; i++) { 2073 if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) { 2074 bus_dmamap_unload(sc->sc_dmat, 2075 sc->re_ldata.re_txq[i].txq_dmamap); 2076 m_freem(sc->re_ldata.re_txq[i].txq_mbuf); 2077 sc->re_ldata.re_txq[i].txq_mbuf = NULL; 2078 } 2079 } 2080 2081 /* Free the RX list buffers. */ 2082 for (i = 0; i < RE_RX_DESC_CNT; i++) { 2083 if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) { 2084 bus_dmamap_unload(sc->sc_dmat, 2085 sc->re_ldata.re_rxsoft[i].rxs_dmamap); 2086 m_freem(sc->re_ldata.re_rxsoft[i].rxs_mbuf); 2087 sc->re_ldata.re_rxsoft[i].rxs_mbuf = NULL; 2088 } 2089 } 2090 2091 if (disable) 2092 re_disable(sc); 2093 2094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2095 ifp->if_timer = 0; 2096 } 2097