1 /* $OpenBSD: re.c,v 1.104 2009/02/12 11:55:29 martynas Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support RealTek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 124 #include <net/if.h> 125 #include <net/if_dl.h> 126 #include <net/if_media.h> 127 128 #ifdef INET 129 #include <netinet/in.h> 130 #include <netinet/in_systm.h> 131 #include <netinet/in_var.h> 132 #include <netinet/ip.h> 133 #include <netinet/if_ether.h> 134 #endif 135 136 #if NVLAN > 0 137 #include <net/if_types.h> 138 #include <net/if_vlan_var.h> 139 #endif 140 141 #if NBPFILTER > 0 142 #include <net/bpf.h> 143 #endif 144 145 #include <dev/mii/mii.h> 146 #include <dev/mii/miivar.h> 147 148 #include <dev/pci/pcireg.h> 149 #include <dev/pci/pcivar.h> 150 151 #include <dev/ic/rtl81x9reg.h> 152 #include <dev/ic/revar.h> 153 154 #ifdef RE_DEBUG 155 int redebug = 0; 156 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 157 #else 158 #define DPRINTF(x) 159 #endif 160 161 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 162 163 int re_encap(struct rl_softc *, struct mbuf *, int *); 164 165 int re_newbuf(struct rl_softc *, int, struct mbuf *); 166 int re_rx_list_init(struct rl_softc *); 167 int re_tx_list_init(struct rl_softc *); 168 int re_rxeof(struct rl_softc *); 169 int re_txeof(struct rl_softc *); 170 void re_tick(void *); 171 void re_start(struct ifnet *); 172 int re_ioctl(struct ifnet *, u_long, caddr_t); 173 void re_watchdog(struct ifnet *); 174 int re_ifmedia_upd(struct ifnet *); 175 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176 177 void re_eeprom_putbyte(struct rl_softc *, int); 178 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 179 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 180 181 int re_gmii_readreg(struct device *, int, int); 182 void re_gmii_writereg(struct device *, int, int, int); 183 184 int re_miibus_readreg(struct device *, int, int); 185 void re_miibus_writereg(struct device *, int, int, int); 186 void re_miibus_statchg(struct device *); 187 188 void re_iff(struct rl_softc *); 189 void re_reset(struct rl_softc *); 190 191 void re_setup_hw_im(struct rl_softc *); 192 void re_setup_sim_im(struct rl_softc *); 193 void re_disable_hw_im(struct rl_softc *); 194 void re_disable_sim_im(struct rl_softc *); 195 void re_config_imtype(struct rl_softc *, int); 196 void re_setup_intr(struct rl_softc *, int, int); 197 198 #ifdef RE_DIAG 199 int re_diag(struct rl_softc *); 200 #endif 201 202 struct cfdriver re_cd = { 203 0, "re", DV_IFNET 204 }; 205 206 #define EE_SET(x) \ 207 CSR_WRITE_1(sc, RL_EECMD, \ 208 CSR_READ_1(sc, RL_EECMD) | x) 209 210 #define EE_CLR(x) \ 211 CSR_WRITE_1(sc, RL_EECMD, \ 212 CSR_READ_1(sc, RL_EECMD) & ~x) 213 214 static const struct re_revision { 215 u_int32_t re_chipid; 216 const char *re_name; 217 } re_revisions[] = { 218 { RL_HWREV_8100, "RTL8100" }, 219 { RL_HWREV_8100E_SPIN1, "RTL8100E 1" }, 220 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 221 { RL_HWREV_8101, "RTL8101" }, 222 { RL_HWREV_8101E, "RTL8101E" }, 223 { RL_HWREV_8102E, "RTL8102E" }, 224 { RL_HWREV_8102EL, "RTL8102EL" }, 225 { RL_HWREV_8110S, "RTL8110S" }, 226 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 227 { RL_HWREV_8168_SPIN1, "RTL8168 1" }, 228 { RL_HWREV_8168_SPIN2, "RTL8168 2" }, 229 { RL_HWREV_8168_SPIN3, "RTL8168 3" }, 230 { RL_HWREV_8168C, "RTL8168C/8111C" }, 231 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 232 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 233 { RL_HWREV_8168D, "RTL8168D/8111D" }, 234 { RL_HWREV_8169, "RTL8169" }, 235 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 236 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 237 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 238 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 239 { RL_HWREV_8169S, "RTL8169S" }, 240 241 { 0, NULL } 242 }; 243 244 245 static inline void 246 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 247 { 248 d->rl_bufaddr_lo = htole32((uint32_t)addr); 249 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 250 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 251 else 252 d->rl_bufaddr_hi = 0; 253 } 254 255 /* 256 * Send a read command and address to the EEPROM, check for ACK. 257 */ 258 void 259 re_eeprom_putbyte(struct rl_softc *sc, int addr) 260 { 261 int d, i; 262 263 d = addr | (RL_9346_READ << sc->rl_eewidth); 264 265 /* 266 * Feed in each bit and strobe the clock. 267 */ 268 269 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 270 if (d & i) 271 EE_SET(RL_EE_DATAIN); 272 else 273 EE_CLR(RL_EE_DATAIN); 274 DELAY(100); 275 EE_SET(RL_EE_CLK); 276 DELAY(150); 277 EE_CLR(RL_EE_CLK); 278 DELAY(100); 279 } 280 } 281 282 /* 283 * Read a word of data stored in the EEPROM at address 'addr.' 284 */ 285 void 286 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 287 { 288 int i; 289 u_int16_t word = 0; 290 291 /* 292 * Send address of word we want to read. 293 */ 294 re_eeprom_putbyte(sc, addr); 295 296 /* 297 * Start reading bits from EEPROM. 298 */ 299 for (i = 0x8000; i; i >>= 1) { 300 EE_SET(RL_EE_CLK); 301 DELAY(100); 302 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 303 word |= i; 304 EE_CLR(RL_EE_CLK); 305 DELAY(100); 306 } 307 308 *dest = word; 309 } 310 311 /* 312 * Read a sequence of words from the EEPROM. 313 */ 314 void 315 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 316 { 317 int i; 318 u_int16_t word = 0, *ptr; 319 320 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 321 322 DELAY(100); 323 324 for (i = 0; i < cnt; i++) { 325 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 326 re_eeprom_getword(sc, off + i, &word); 327 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 328 ptr = (u_int16_t *)(dest + (i * 2)); 329 *ptr = word; 330 } 331 332 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 333 } 334 335 int 336 re_gmii_readreg(struct device *self, int phy, int reg) 337 { 338 struct rl_softc *sc = (struct rl_softc *)self; 339 u_int32_t rval; 340 int i; 341 342 if (phy != 7) 343 return (0); 344 345 /* Let the rgephy driver read the GMEDIASTAT register */ 346 347 if (reg == RL_GMEDIASTAT) { 348 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 349 return (rval); 350 } 351 352 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 353 DELAY(1000); 354 355 for (i = 0; i < RL_TIMEOUT; i++) { 356 rval = CSR_READ_4(sc, RL_PHYAR); 357 if (rval & RL_PHYAR_BUSY) 358 break; 359 DELAY(100); 360 } 361 362 if (i == RL_TIMEOUT) { 363 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 364 return (0); 365 } 366 367 return (rval & RL_PHYAR_PHYDATA); 368 } 369 370 void 371 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 372 { 373 struct rl_softc *sc = (struct rl_softc *)dev; 374 u_int32_t rval; 375 int i; 376 377 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 378 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 379 DELAY(1000); 380 381 for (i = 0; i < RL_TIMEOUT; i++) { 382 rval = CSR_READ_4(sc, RL_PHYAR); 383 if (!(rval & RL_PHYAR_BUSY)) 384 break; 385 DELAY(100); 386 } 387 388 if (i == RL_TIMEOUT) 389 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 390 } 391 392 int 393 re_miibus_readreg(struct device *dev, int phy, int reg) 394 { 395 struct rl_softc *sc = (struct rl_softc *)dev; 396 u_int16_t rval = 0; 397 u_int16_t re8139_reg = 0; 398 int s; 399 400 s = splnet(); 401 402 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 403 rval = re_gmii_readreg(dev, phy, reg); 404 splx(s); 405 return (rval); 406 } 407 408 /* Pretend the internal PHY is only at address 0 */ 409 if (phy) { 410 splx(s); 411 return (0); 412 } 413 switch(reg) { 414 case MII_BMCR: 415 re8139_reg = RL_BMCR; 416 break; 417 case MII_BMSR: 418 re8139_reg = RL_BMSR; 419 break; 420 case MII_ANAR: 421 re8139_reg = RL_ANAR; 422 break; 423 case MII_ANER: 424 re8139_reg = RL_ANER; 425 break; 426 case MII_ANLPAR: 427 re8139_reg = RL_LPAR; 428 break; 429 case MII_PHYIDR1: 430 case MII_PHYIDR2: 431 splx(s); 432 return (0); 433 /* 434 * Allow the rlphy driver to read the media status 435 * register. If we have a link partner which does not 436 * support NWAY, this is the register which will tell 437 * us the results of parallel detection. 438 */ 439 case RL_MEDIASTAT: 440 rval = CSR_READ_1(sc, RL_MEDIASTAT); 441 splx(s); 442 return (rval); 443 default: 444 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 445 splx(s); 446 return (0); 447 } 448 rval = CSR_READ_2(sc, re8139_reg); 449 if (re8139_reg == RL_BMCR) { 450 /* 8139C+ has different bit layout. */ 451 rval &= ~(BMCR_LOOP | BMCR_ISO); 452 } 453 splx(s); 454 return (rval); 455 } 456 457 void 458 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 459 { 460 struct rl_softc *sc = (struct rl_softc *)dev; 461 u_int16_t re8139_reg = 0; 462 int s; 463 464 s = splnet(); 465 466 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 467 re_gmii_writereg(dev, phy, reg, data); 468 splx(s); 469 return; 470 } 471 472 /* Pretend the internal PHY is only at address 0 */ 473 if (phy) { 474 splx(s); 475 return; 476 } 477 switch(reg) { 478 case MII_BMCR: 479 re8139_reg = RL_BMCR; 480 /* 8139C+ has different bit layout. */ 481 data &= ~(BMCR_LOOP | BMCR_ISO); 482 break; 483 case MII_BMSR: 484 re8139_reg = RL_BMSR; 485 break; 486 case MII_ANAR: 487 re8139_reg = RL_ANAR; 488 break; 489 case MII_ANER: 490 re8139_reg = RL_ANER; 491 break; 492 case MII_ANLPAR: 493 re8139_reg = RL_LPAR; 494 break; 495 case MII_PHYIDR1: 496 case MII_PHYIDR2: 497 splx(s); 498 return; 499 break; 500 default: 501 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 502 splx(s); 503 return; 504 } 505 CSR_WRITE_2(sc, re8139_reg, data); 506 splx(s); 507 } 508 509 void 510 re_miibus_statchg(struct device *dev) 511 { 512 } 513 514 void 515 re_iff(struct rl_softc *sc) 516 { 517 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 518 int h = 0; 519 u_int32_t hashes[2] = { 0, 0 }; 520 u_int32_t rxfilt; 521 int mcnt = 0; 522 struct arpcom *ac = &sc->sc_arpcom; 523 struct ether_multi *enm; 524 struct ether_multistep step; 525 526 rxfilt = CSR_READ_4(sc, RL_RXCFG); 527 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_MULTI); 528 ifp->if_flags &= ~IFF_ALLMULTI; 529 530 if (ifp->if_flags & IFF_PROMISC || 531 ac->ac_multirangecnt > 0) { 532 ifp ->if_flags |= IFF_ALLMULTI; 533 rxfilt |= RL_RXCFG_RX_MULTI; 534 if (ifp->if_flags & IFF_PROMISC) 535 rxfilt |= RL_RXCFG_RX_ALLPHYS; 536 hashes[0] = hashes[1] = 0xFFFFFFFF; 537 } else { 538 /* first, zot all the existing hash bits */ 539 CSR_WRITE_4(sc, RL_MAR0, 0); 540 CSR_WRITE_4(sc, RL_MAR4, 0); 541 542 /* now program new ones */ 543 ETHER_FIRST_MULTI(step, ac, enm); 544 while (enm != NULL) { 545 h = ether_crc32_be(enm->enm_addrlo, 546 ETHER_ADDR_LEN) >> 26; 547 if (h < 32) 548 hashes[0] |= (1 << h); 549 else 550 hashes[1] |= (1 << (h - 32)); 551 mcnt++; 552 ETHER_NEXT_MULTI(step, enm); 553 } 554 555 if (mcnt) 556 rxfilt |= RL_RXCFG_RX_MULTI; 557 } 558 559 /* 560 * For some unfathomable reason, RealTek decided to reverse 561 * the order of the multicast hash registers in the PCI Express 562 * parts. This means we have to write the hash pattern in reverse 563 * order for those devices. 564 */ 565 if (sc->rl_flags & RL_FLAG_INVMAR) { 566 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 567 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 568 } else { 569 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 570 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 571 } 572 573 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 574 } 575 576 void 577 re_reset(struct rl_softc *sc) 578 { 579 int i; 580 581 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 582 583 for (i = 0; i < RL_TIMEOUT; i++) { 584 DELAY(10); 585 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 586 break; 587 } 588 if (i == RL_TIMEOUT) 589 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 590 591 if (sc->rl_flags & RL_FLAG_MACLDPS) 592 CSR_WRITE_1(sc, RL_LDPS, 1); 593 } 594 595 #ifdef RE_DIAG 596 597 /* 598 * The following routine is designed to test for a defect on some 599 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 600 * lines connected to the bus, however for a 32-bit only card, they 601 * should be pulled high. The result of this defect is that the 602 * NIC will not work right if you plug it into a 64-bit slot: DMA 603 * operations will be done with 64-bit transfers, which will fail 604 * because the 64-bit data lines aren't connected. 605 * 606 * There's no way to work around this (short of talking a soldering 607 * iron to the board), however we can detect it. The method we use 608 * here is to put the NIC into digital loopback mode, set the receiver 609 * to promiscuous mode, and then try to send a frame. We then compare 610 * the frame data we sent to what was received. If the data matches, 611 * then the NIC is working correctly, otherwise we know the user has 612 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 613 * slot. In the latter case, there's no way the NIC can work correctly, 614 * so we print out a message on the console and abort the device attach. 615 */ 616 617 int 618 re_diag(struct rl_softc *sc) 619 { 620 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 621 struct mbuf *m0; 622 struct ether_header *eh; 623 struct rl_rxsoft *rxs; 624 struct rl_desc *cur_rx; 625 bus_dmamap_t dmamap; 626 u_int16_t status; 627 u_int32_t rxstat; 628 int total_len, i, s, error = 0, phyaddr; 629 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 630 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 631 632 DPRINTF(("inside re_diag\n")); 633 /* Allocate a single mbuf */ 634 635 MGETHDR(m0, M_DONTWAIT, MT_DATA); 636 if (m0 == NULL) 637 return (ENOBUFS); 638 639 /* 640 * Initialize the NIC in test mode. This sets the chip up 641 * so that it can send and receive frames, but performs the 642 * following special functions: 643 * - Puts receiver in promiscuous mode 644 * - Enables digital loopback mode 645 * - Leaves interrupts turned off 646 */ 647 648 ifp->if_flags |= IFF_PROMISC; 649 sc->rl_testmode = 1; 650 re_reset(sc); 651 re_init(ifp); 652 sc->rl_flags |= RL_FLAG_LINK; 653 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 654 phyaddr = 0; 655 else 656 phyaddr = 1; 657 658 re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR, 659 BMCR_RESET); 660 for (i = 0; i < RL_TIMEOUT; i++) { 661 status = re_miibus_readreg((struct device *)sc, 662 phyaddr, MII_BMCR); 663 if (!(status & BMCR_RESET)) 664 break; 665 } 666 667 re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR, 668 BMCR_LOOP); 669 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 670 671 DELAY(100000); 672 673 /* Put some data in the mbuf */ 674 675 eh = mtod(m0, struct ether_header *); 676 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 677 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 678 eh->ether_type = htons(ETHERTYPE_IP); 679 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 680 681 /* 682 * Queue the packet, start transmission. 683 */ 684 685 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 686 s = splnet(); 687 IFQ_ENQUEUE(&ifp->if_snd, m0, NULL, error); 688 re_start(ifp); 689 splx(s); 690 m0 = NULL; 691 692 DPRINTF(("re_diag: transmission started\n")); 693 694 /* Wait for it to propagate through the chip */ 695 696 DELAY(100000); 697 for (i = 0; i < RL_TIMEOUT; i++) { 698 status = CSR_READ_2(sc, RL_ISR); 699 CSR_WRITE_2(sc, RL_ISR, status); 700 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 701 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 702 break; 703 DELAY(10); 704 } 705 if (i == RL_TIMEOUT) { 706 printf("%s: diagnostic failed, failed to receive packet " 707 "in loopback mode\n", sc->sc_dev.dv_xname); 708 error = EIO; 709 goto done; 710 } 711 712 /* 713 * The packet should have been dumped into the first 714 * entry in the RX DMA ring. Grab it from there. 715 */ 716 717 rxs = &sc->rl_ldata.rl_rxsoft[0]; 718 dmamap = rxs->rxs_dmamap; 719 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 720 BUS_DMASYNC_POSTREAD); 721 bus_dmamap_unload(sc->sc_dmat, dmamap); 722 723 m0 = rxs->rxs_mbuf; 724 rxs->rxs_mbuf = NULL; 725 eh = mtod(m0, struct ether_header *); 726 727 RL_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 728 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 729 rxstat = letoh32(cur_rx->rl_cmdstat); 730 total_len = rxstat & sc->rl_rxlenmask; 731 732 if (total_len != ETHER_MIN_LEN) { 733 printf("%s: diagnostic failed, received short packet\n", 734 sc->sc_dev.dv_xname); 735 error = EIO; 736 goto done; 737 } 738 739 DPRINTF(("re_diag: packet received\n")); 740 741 /* Test that the received packet data matches what we sent. */ 742 743 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 744 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 745 ntohs(eh->ether_type) != ETHERTYPE_IP) { 746 printf("%s: WARNING, DMA FAILURE!\n", sc->sc_dev.dv_xname); 747 printf("%s: expected TX data: %s", 748 sc->sc_dev.dv_xname, ether_sprintf(dst)); 749 printf("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP); 750 printf("%s: received RX data: %s", 751 sc->sc_dev.dv_xname, 752 ether_sprintf(eh->ether_dhost)); 753 printf("/%s/0x%x\n", ether_sprintf(eh->ether_shost), 754 ntohs(eh->ether_type)); 755 printf("%s: You may have a defective 32-bit NIC plugged " 756 "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname); 757 printf("%s: Please re-install the NIC in a 32-bit slot " 758 "for proper operation.\n", sc->sc_dev.dv_xname); 759 printf("%s: Read the re(4) man page for more details.\n", 760 sc->sc_dev.dv_xname); 761 error = EIO; 762 } 763 764 done: 765 /* Turn interface off, release resources */ 766 sc->rl_testmode = 0; 767 sc->rl_flags &= ~RL_FLAG_LINK; 768 ifp->if_flags &= ~IFF_PROMISC; 769 re_stop(ifp, 1); 770 if (m0 != NULL) 771 m_freem(m0); 772 DPRINTF(("leaving re_diag\n")); 773 774 return (error); 775 } 776 777 #endif 778 779 #ifdef __armish__ 780 /* 781 * Thecus N2100 doesn't store the full mac address in eeprom 782 * so we read the old mac address from the device before the reset 783 * in hopes that the proper mac address is already there. 784 */ 785 union { 786 u_int32_t eaddr_word[2]; 787 u_char eaddr[ETHER_ADDR_LEN]; 788 } boot_eaddr; 789 int boot_eaddr_valid; 790 #endif /* __armish__ */ 791 /* 792 * Attach the interface. Allocate softc structures, do ifmedia 793 * setup and ethernet/BPF attach. 794 */ 795 int 796 re_attach(struct rl_softc *sc, const char *intrstr) 797 { 798 u_char eaddr[ETHER_ADDR_LEN]; 799 u_int16_t as[ETHER_ADDR_LEN / 2]; 800 struct ifnet *ifp; 801 u_int16_t re_did = 0; 802 int error = 0, i; 803 const struct re_revision *rr; 804 const char *re_name = NULL; 805 806 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 807 808 switch (sc->sc_hwrev) { 809 case RL_HWREV_8139CPLUS: 810 sc->rl_flags |= RL_FLAG_NOJUMBO; 811 break; 812 case RL_HWREV_8100E_SPIN1: 813 case RL_HWREV_8100E_SPIN2: 814 case RL_HWREV_8101E: 815 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 816 RL_FLAG_PHYWAKE; 817 break; 818 case RL_HWREV_8102E: 819 case RL_HWREV_8102EL: 820 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 821 RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 822 RL_FLAG_MACSTAT; 823 break; 824 case RL_HWREV_8168_SPIN1: 825 case RL_HWREV_8168_SPIN2: 826 case RL_HWREV_8168_SPIN3: 827 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 828 RL_FLAG_MACSTAT | RL_FLAG_HWIM; 829 break; 830 case RL_HWREV_8168C: 831 case RL_HWREV_8168C_SPIN2: 832 case RL_HWREV_8168CP: 833 case RL_HWREV_8168D: 834 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 835 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 836 RL_FLAG_HWIM; 837 /* 838 * These controllers support jumbo frame but it seems 839 * that enabling it requires touching additional magic 840 * registers. Depending on MAC revisions some 841 * controllers need to disable checksum offload. So 842 * disable jumbo frame until I have better idea what 843 * it really requires to make it support. 844 * RTL8168C/CP : supports up to 6KB jumbo frame. 845 * RTL8111C/CP : supports up to 9KB jumbo frame. 846 */ 847 sc->rl_flags |= RL_FLAG_NOJUMBO; 848 break; 849 case RL_HWREV_8169: 850 case RL_HWREV_8169S: 851 case RL_HWREV_8110S: 852 sc->rl_flags |= RL_FLAG_MACLDPS; 853 break; 854 case RL_HWREV_8169_8110SB: 855 case RL_HWREV_8169_8110SBL: 856 case RL_HWREV_8169_8110SCd: 857 case RL_HWREV_8169_8110SCe: 858 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACLDPS; 859 break; 860 default: 861 break; 862 } 863 864 /* Reset the adapter. */ 865 re_reset(sc); 866 867 sc->rl_tx_time = 5; /* 125us */ 868 sc->rl_rx_time = 2; /* 50us */ 869 if (sc->rl_flags & RL_FLAG_PCIE) 870 sc->rl_sim_time = 75; /* 75us */ 871 else 872 sc->rl_sim_time = 125; /* 125us */ 873 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 874 875 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 876 sc->rl_bus_speed = 33; /* XXX */ 877 else if (sc->rl_flags & RL_FLAG_PCIE) 878 sc->rl_bus_speed = 125; 879 else { 880 u_int8_t cfg2; 881 882 cfg2 = CSR_READ_1(sc, RL_CFG2); 883 switch (cfg2 & RL_CFG2_PCI_MASK) { 884 case RL_CFG2_PCI_33MHZ: 885 sc->rl_bus_speed = 33; 886 break; 887 case RL_CFG2_PCI_66MHZ: 888 sc->rl_bus_speed = 66; 889 break; 890 default: 891 printf("%s: unknown bus speed, assume 33MHz\n", 892 sc->sc_dev.dv_xname); 893 sc->rl_bus_speed = 33; 894 break; 895 } 896 897 if (cfg2 & RL_CFG2_PCI_64BIT) 898 sc->rl_flags |= RL_FLAG_PCI64; 899 } 900 901 re_config_imtype(sc, sc->rl_imtype); 902 903 if (sc->rl_flags & RL_FLAG_PAR) { 904 /* 905 * XXX Should have a better way to extract station 906 * address from EEPROM. 907 */ 908 for (i = 0; i < ETHER_ADDR_LEN; i++) 909 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 910 } else { 911 sc->rl_eewidth = RL_9356_ADDR_LEN; 912 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 913 if (re_did != 0x8129) 914 sc->rl_eewidth = RL_9346_ADDR_LEN; 915 916 /* 917 * Get station address from the EEPROM. 918 */ 919 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 920 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 921 as[i] = letoh16(as[i]); 922 bcopy(as, eaddr, sizeof(eaddr)); 923 924 #ifdef __armish__ 925 /* 926 * On the Thecus N2100, the MAC address in the EEPROM is 927 * always 00:14:fd:10:00:00. The proper MAC address is 928 * stored in flash. Fortunately RedBoot configures the 929 * proper MAC address (for the first onboard interface) 930 * which we can read from the IDR. 931 */ 932 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && 933 eaddr[2] == 0xfd && eaddr[3] == 0x10 && 934 eaddr[4] == 0x00 && eaddr[5] == 0x00) { 935 if (boot_eaddr_valid == 0) { 936 boot_eaddr.eaddr_word[1] = 937 letoh32(CSR_READ_4(sc, RL_IDR4)); 938 boot_eaddr.eaddr_word[0] = 939 letoh32(CSR_READ_4(sc, RL_IDR0)); 940 boot_eaddr_valid = 1; 941 } 942 943 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr)); 944 eaddr[5] += sc->sc_dev.dv_unit; 945 } 946 #endif 947 } 948 949 /* 950 * Set RX length mask, TX poll request register 951 * and TX descriptor count. 952 */ 953 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 954 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 955 sc->rl_txstart = RL_TXSTART; 956 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139; 957 } else { 958 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 959 sc->rl_txstart = RL_GTXSTART; 960 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169; 961 } 962 963 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 964 965 for (rr = re_revisions; rr->re_name != NULL; rr++) { 966 if (rr->re_chipid == sc->sc_hwrev) 967 re_name = rr->re_name; 968 } 969 970 if (re_name == NULL) 971 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 972 else 973 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 974 975 printf(", %s, address %s\n", intrstr, 976 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 977 978 if (sc->rl_ldata.rl_tx_desc_cnt > 979 PAGE_SIZE / sizeof(struct rl_desc)) { 980 sc->rl_ldata.rl_tx_desc_cnt = 981 PAGE_SIZE / sizeof(struct rl_desc); 982 } 983 984 /* Allocate DMA'able memory for the TX ring */ 985 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 986 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 987 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT)) != 0) { 988 printf("%s: can't allocate tx listseg, error = %d\n", 989 sc->sc_dev.dv_xname, error); 990 goto fail_0; 991 } 992 993 /* Load the map for the TX ring. */ 994 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 995 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 996 (caddr_t *)&sc->rl_ldata.rl_tx_list, 997 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 998 printf("%s: can't map tx list, error = %d\n", 999 sc->sc_dev.dv_xname, error); 1000 goto fail_1; 1001 } 1002 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1003 1004 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 1005 RL_TX_LIST_SZ(sc), 0, 0, 1006 &sc->rl_ldata.rl_tx_list_map)) != 0) { 1007 printf("%s: can't create tx list map, error = %d\n", 1008 sc->sc_dev.dv_xname, error); 1009 goto fail_2; 1010 } 1011 1012 if ((error = bus_dmamap_load(sc->sc_dmat, 1013 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1014 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 1015 printf("%s: can't load tx list, error = %d\n", 1016 sc->sc_dev.dv_xname, error); 1017 goto fail_3; 1018 } 1019 1020 /* Create DMA maps for TX buffers */ 1021 for (i = 0; i < RL_TX_QLEN; i++) { 1022 error = bus_dmamap_create(sc->sc_dmat, 1023 RL_JUMBO_FRAMELEN, 1024 RL_TX_DESC_CNT(sc) - RL_NTXDESC_RSVD, RL_TDESC_CMD_FRAGLEN, 1025 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap); 1026 if (error) { 1027 printf("%s: can't create DMA map for TX\n", 1028 sc->sc_dev.dv_xname); 1029 goto fail_4; 1030 } 1031 } 1032 1033 /* Allocate DMA'able memory for the RX ring */ 1034 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1035 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 1036 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT)) != 0) { 1037 printf("%s: can't allocate rx listnseg, error = %d\n", 1038 sc->sc_dev.dv_xname, error); 1039 goto fail_4; 1040 } 1041 1042 /* Load the map for the RX ring. */ 1043 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 1044 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ, 1045 (caddr_t *)&sc->rl_ldata.rl_rx_list, 1046 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 1047 printf("%s: can't map rx list, error = %d\n", 1048 sc->sc_dev.dv_xname, error); 1049 goto fail_5; 1050 1051 } 1052 memset(sc->rl_ldata.rl_rx_list, 0, RL_RX_DMAMEM_SZ); 1053 1054 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1, 1055 RL_RX_DMAMEM_SZ, 0, 0, 1056 &sc->rl_ldata.rl_rx_list_map)) != 0) { 1057 printf("%s: can't create rx list map, error = %d\n", 1058 sc->sc_dev.dv_xname, error); 1059 goto fail_6; 1060 } 1061 1062 if ((error = bus_dmamap_load(sc->sc_dmat, 1063 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1064 RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 1065 printf("%s: can't load rx list, error = %d\n", 1066 sc->sc_dev.dv_xname, error); 1067 goto fail_7; 1068 } 1069 1070 /* Create DMA maps for RX buffers */ 1071 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1072 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1073 0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1074 if (error) { 1075 printf("%s: can't create DMA map for RX\n", 1076 sc->sc_dev.dv_xname); 1077 goto fail_8; 1078 } 1079 } 1080 1081 ifp = &sc->sc_arpcom.ac_if; 1082 ifp->if_softc = sc; 1083 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1084 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1085 ifp->if_ioctl = re_ioctl; 1086 ifp->if_start = re_start; 1087 ifp->if_watchdog = re_watchdog; 1088 ifp->if_init = re_init; 1089 if ((sc->rl_flags & RL_FLAG_NOJUMBO) == 0) 1090 ifp->if_hardmtu = RL_JUMBO_MTU; 1091 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); 1092 IFQ_SET_READY(&ifp->if_snd); 1093 1094 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 1095 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 1096 1097 #if NVLAN > 0 1098 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1099 #endif 1100 1101 timeout_set(&sc->timer_handle, re_tick, sc); 1102 1103 /* Take PHY out of power down mode. */ 1104 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1105 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1106 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1107 } 1108 1109 /* Do MII setup */ 1110 sc->sc_mii.mii_ifp = ifp; 1111 sc->sc_mii.mii_readreg = re_miibus_readreg; 1112 sc->sc_mii.mii_writereg = re_miibus_writereg; 1113 sc->sc_mii.mii_statchg = re_miibus_statchg; 1114 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1115 re_ifmedia_sts); 1116 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1117 MII_OFFSET_ANY, MIIF_DOPAUSE); 1118 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1119 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1120 ifmedia_add(&sc->sc_mii.mii_media, 1121 IFM_ETHER|IFM_NONE, 0, NULL); 1122 ifmedia_set(&sc->sc_mii.mii_media, 1123 IFM_ETHER|IFM_NONE); 1124 } else 1125 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1126 1127 /* 1128 * Call MI attach routine. 1129 */ 1130 re_reset(sc); 1131 if_attach(ifp); 1132 ether_ifattach(ifp); 1133 1134 #ifdef RE_DIAG 1135 /* 1136 * Perform hardware diagnostic on the original RTL8169. 1137 * Some 32-bit cards were incorrectly wired and would 1138 * malfunction if plugged into a 64-bit slot. 1139 */ 1140 if (sc->sc_hwrev == RL_HWREV_8169) { 1141 error = re_diag(sc); 1142 if (error) { 1143 printf("%s: attach aborted due to hardware diag failure\n", 1144 sc->sc_dev.dv_xname); 1145 ether_ifdetach(ifp); 1146 goto fail_8; 1147 } 1148 } 1149 #endif 1150 1151 return (0); 1152 1153 fail_8: 1154 /* Destroy DMA maps for RX buffers. */ 1155 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1156 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1157 bus_dmamap_destroy(sc->sc_dmat, 1158 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1159 } 1160 1161 /* Free DMA'able memory for the RX ring. */ 1162 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1163 fail_7: 1164 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1165 fail_6: 1166 bus_dmamem_unmap(sc->sc_dmat, 1167 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ); 1168 fail_5: 1169 bus_dmamem_free(sc->sc_dmat, 1170 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1171 1172 fail_4: 1173 /* Destroy DMA maps for TX buffers. */ 1174 for (i = 0; i < RL_TX_QLEN; i++) { 1175 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1176 bus_dmamap_destroy(sc->sc_dmat, 1177 sc->rl_ldata.rl_txq[i].txq_dmamap); 1178 } 1179 1180 /* Free DMA'able memory for the TX ring. */ 1181 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1182 fail_3: 1183 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1184 fail_2: 1185 bus_dmamem_unmap(sc->sc_dmat, 1186 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1187 fail_1: 1188 bus_dmamem_free(sc->sc_dmat, 1189 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1190 fail_0: 1191 return (1); 1192 } 1193 1194 1195 int 1196 re_newbuf(struct rl_softc *sc, int idx, struct mbuf *m) 1197 { 1198 struct mbuf *n = NULL; 1199 bus_dmamap_t map; 1200 struct rl_desc *d; 1201 struct rl_rxsoft *rxs; 1202 u_int32_t cmdstat; 1203 int error; 1204 1205 if (m == NULL) { 1206 MGETHDR(n, M_DONTWAIT, MT_DATA); 1207 if (n == NULL) 1208 return (ENOBUFS); 1209 1210 MCLGET(n, M_DONTWAIT); 1211 if (!(n->m_flags & M_EXT)) { 1212 m_freem(n); 1213 return (ENOBUFS); 1214 } 1215 m = n; 1216 } else 1217 m->m_data = m->m_ext.ext_buf; 1218 1219 /* 1220 * Initialize mbuf length fields and fixup 1221 * alignment so that the frame payload is 1222 * longword aligned on strict alignment archs. 1223 */ 1224 m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN; 1225 m->m_data += RE_ETHER_ALIGN; 1226 1227 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1228 map = rxs->rxs_dmamap; 1229 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1230 BUS_DMA_READ|BUS_DMA_NOWAIT); 1231 1232 if (error) 1233 goto out; 1234 1235 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1236 BUS_DMASYNC_PREREAD); 1237 1238 d = &sc->rl_ldata.rl_rx_list[idx]; 1239 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1240 cmdstat = letoh32(d->rl_cmdstat); 1241 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1242 if (cmdstat & RL_RDESC_STAT_OWN) { 1243 printf("%s: tried to map busy RX descriptor\n", 1244 sc->sc_dev.dv_xname); 1245 goto out; 1246 } 1247 1248 rxs->rxs_mbuf = m; 1249 1250 d->rl_vlanctl = 0; 1251 cmdstat = map->dm_segs[0].ds_len; 1252 if (idx == (RL_RX_DESC_CNT - 1)) 1253 cmdstat |= RL_RDESC_CMD_EOR; 1254 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1255 d->rl_cmdstat = htole32(cmdstat); 1256 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1257 cmdstat |= RL_RDESC_CMD_OWN; 1258 d->rl_cmdstat = htole32(cmdstat); 1259 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1260 1261 return (0); 1262 out: 1263 if (n != NULL) 1264 m_freem(n); 1265 return (ENOMEM); 1266 } 1267 1268 1269 int 1270 re_tx_list_init(struct rl_softc *sc) 1271 { 1272 int i; 1273 1274 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1275 for (i = 0; i < RL_TX_QLEN; i++) { 1276 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1277 } 1278 1279 bus_dmamap_sync(sc->sc_dmat, 1280 sc->rl_ldata.rl_tx_list_map, 0, 1281 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1282 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1283 sc->rl_ldata.rl_txq_prodidx = 0; 1284 sc->rl_ldata.rl_txq_considx = 0; 1285 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc); 1286 sc->rl_ldata.rl_tx_nextfree = 0; 1287 1288 return (0); 1289 } 1290 1291 int 1292 re_rx_list_init(struct rl_softc *sc) 1293 { 1294 int i; 1295 1296 memset((char *)sc->rl_ldata.rl_rx_list, 0, RL_RX_LIST_SZ); 1297 1298 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1299 if (re_newbuf(sc, i, NULL) == ENOBUFS) 1300 return (ENOBUFS); 1301 } 1302 1303 sc->rl_ldata.rl_rx_prodidx = 0; 1304 sc->rl_head = sc->rl_tail = NULL; 1305 1306 return (0); 1307 } 1308 1309 /* 1310 * RX handler for C+ and 8169. For the gigE chips, we support 1311 * the reception of jumbo frames that have been fragmented 1312 * across multiple 2K mbuf cluster buffers. 1313 */ 1314 int 1315 re_rxeof(struct rl_softc *sc) 1316 { 1317 struct mbuf *m; 1318 struct ifnet *ifp; 1319 int i, total_len, rx = 0; 1320 struct rl_desc *cur_rx; 1321 struct rl_rxsoft *rxs; 1322 u_int32_t rxstat, rxvlan; 1323 1324 ifp = &sc->sc_arpcom.ac_if; 1325 1326 for (i = sc->rl_ldata.rl_rx_prodidx;; i = RL_NEXT_RX_DESC(sc, i)) { 1327 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1328 RL_RXDESCSYNC(sc, i, 1329 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1330 rxstat = letoh32(cur_rx->rl_cmdstat); 1331 rxvlan = letoh32(cur_rx->rl_vlanctl); 1332 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1333 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1334 break; 1335 total_len = rxstat & sc->rl_rxlenmask; 1336 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1337 m = rxs->rxs_mbuf; 1338 rx = 1; 1339 1340 /* Invalidate the RX mbuf and unload its map */ 1341 1342 bus_dmamap_sync(sc->sc_dmat, 1343 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1344 BUS_DMASYNC_POSTREAD); 1345 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1346 1347 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1348 m->m_len = RE_RX_DESC_BUFLEN; 1349 if (sc->rl_head == NULL) 1350 sc->rl_head = sc->rl_tail = m; 1351 else { 1352 m->m_flags &= ~M_PKTHDR; 1353 sc->rl_tail->m_next = m; 1354 sc->rl_tail = m; 1355 } 1356 re_newbuf(sc, i, NULL); 1357 continue; 1358 } 1359 1360 /* 1361 * NOTE: for the 8139C+, the frame length field 1362 * is always 12 bits in size, but for the gigE chips, 1363 * it is 13 bits (since the max RX frame length is 16K). 1364 * Unfortunately, all 32 bits in the status word 1365 * were already used, so to make room for the extra 1366 * length bit, RealTek took out the 'frame alignment 1367 * error' bit and shifted the other status bits 1368 * over one slot. The OWN, EOR, FS and LS bits are 1369 * still in the same places. We have already extracted 1370 * the frame length and checked the OWN bit, so rather 1371 * than using an alternate bit mapping, we shift the 1372 * status bits one space to the right so we can evaluate 1373 * them using the 8169 status as though it was in the 1374 * same format as that of the 8139C+. 1375 */ 1376 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1377 rxstat >>= 1; 1378 1379 /* 1380 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1381 * set, but if CRC is clear, it will still be a valid frame. 1382 */ 1383 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1384 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1385 ifp->if_ierrors++; 1386 /* 1387 * If this is part of a multi-fragment packet, 1388 * discard all the pieces. 1389 */ 1390 if (sc->rl_head != NULL) { 1391 m_freem(sc->rl_head); 1392 sc->rl_head = sc->rl_tail = NULL; 1393 } 1394 re_newbuf(sc, i, m); 1395 continue; 1396 } 1397 1398 /* 1399 * If allocating a replacement mbuf fails, 1400 * reload the current one. 1401 */ 1402 1403 if (re_newbuf(sc, i, NULL)) { 1404 ifp->if_ierrors++; 1405 if (sc->rl_head != NULL) { 1406 m_freem(sc->rl_head); 1407 sc->rl_head = sc->rl_tail = NULL; 1408 } 1409 re_newbuf(sc, i, m); 1410 continue; 1411 } 1412 1413 if (sc->rl_head != NULL) { 1414 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1415 if (m->m_len == 0) 1416 m->m_len = RE_RX_DESC_BUFLEN; 1417 /* 1418 * Special case: if there's 4 bytes or less 1419 * in this buffer, the mbuf can be discarded: 1420 * the last 4 bytes is the CRC, which we don't 1421 * care about anyway. 1422 */ 1423 if (m->m_len <= ETHER_CRC_LEN) { 1424 sc->rl_tail->m_len -= 1425 (ETHER_CRC_LEN - m->m_len); 1426 m_freem(m); 1427 } else { 1428 m->m_len -= ETHER_CRC_LEN; 1429 m->m_flags &= ~M_PKTHDR; 1430 sc->rl_tail->m_next = m; 1431 } 1432 m = sc->rl_head; 1433 sc->rl_head = sc->rl_tail = NULL; 1434 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1435 } else 1436 m->m_pkthdr.len = m->m_len = 1437 (total_len - ETHER_CRC_LEN); 1438 1439 ifp->if_ipackets++; 1440 m->m_pkthdr.rcvif = ifp; 1441 1442 /* Do RX checksumming */ 1443 1444 if (sc->rl_flags & RL_FLAG_DESCV2) { 1445 /* Check IP header checksum */ 1446 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1447 !(rxstat & RL_RDESC_STAT_IPSUMBAD) && 1448 (rxvlan & RL_RDESC_IPV4)) 1449 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1450 1451 /* Check TCP/UDP checksum */ 1452 if (((rxstat & RL_RDESC_STAT_TCP) && 1453 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1454 ((rxstat & RL_RDESC_STAT_UDP) && 1455 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1456 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1457 M_UDP_CSUM_IN_OK; 1458 } else { 1459 /* Check IP header checksum */ 1460 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1461 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1462 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1463 1464 /* Check TCP/UDP checksum */ 1465 if ((RL_TCPPKT(rxstat) && 1466 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1467 (RL_UDPPKT(rxstat) && 1468 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1469 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1470 M_UDP_CSUM_IN_OK; 1471 } 1472 #if NVLAN > 0 1473 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1474 m->m_pkthdr.ether_vtag = 1475 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1476 m->m_flags |= M_VLANTAG; 1477 } 1478 #endif 1479 1480 #if NBPFILTER > 0 1481 if (ifp->if_bpf) 1482 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1483 #endif 1484 ether_input_mbuf(ifp, m); 1485 } 1486 1487 sc->rl_ldata.rl_rx_prodidx = i; 1488 1489 return (rx); 1490 } 1491 1492 int 1493 re_txeof(struct rl_softc *sc) 1494 { 1495 struct ifnet *ifp; 1496 struct rl_txq *txq; 1497 uint32_t txstat; 1498 int idx, descidx, tx = 0; 1499 1500 ifp = &sc->sc_arpcom.ac_if; 1501 1502 for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) { 1503 txq = &sc->rl_ldata.rl_txq[idx]; 1504 1505 if (txq->txq_mbuf == NULL) { 1506 KASSERT(idx == sc->rl_ldata.rl_txq_prodidx); 1507 break; 1508 } 1509 1510 descidx = txq->txq_descidx; 1511 RL_TXDESCSYNC(sc, descidx, 1512 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1513 txstat = 1514 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); 1515 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1516 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); 1517 if (txstat & RL_TDESC_CMD_OWN) 1518 break; 1519 1520 tx = 1; 1521 sc->rl_ldata.rl_tx_free += txq->txq_nsegs; 1522 KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc)); 1523 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1524 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1525 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1526 m_freem(txq->txq_mbuf); 1527 txq->txq_mbuf = NULL; 1528 1529 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1530 ifp->if_collisions++; 1531 if (txstat & RL_TDESC_STAT_TXERRSUM) 1532 ifp->if_oerrors++; 1533 else 1534 ifp->if_opackets++; 1535 } 1536 1537 sc->rl_ldata.rl_txq_considx = idx; 1538 1539 if (sc->rl_ldata.rl_tx_free > RL_NTXDESC_RSVD) 1540 ifp->if_flags &= ~IFF_OACTIVE; 1541 1542 /* 1543 * Some chips will ignore a second TX request issued while an 1544 * existing transmission is in progress. If the transmitter goes 1545 * idle but there are still packets waiting to be sent, we need 1546 * to restart the channel here to flush them out. This only 1547 * seems to be required with the PCIe devices. 1548 */ 1549 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc)) 1550 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1551 else 1552 ifp->if_timer = 0; 1553 1554 return (tx); 1555 } 1556 1557 void 1558 re_tick(void *xsc) 1559 { 1560 struct rl_softc *sc = xsc; 1561 struct mii_data *mii; 1562 struct ifnet *ifp; 1563 int s; 1564 1565 ifp = &sc->sc_arpcom.ac_if; 1566 mii = &sc->sc_mii; 1567 1568 s = splnet(); 1569 1570 mii_tick(mii); 1571 if (sc->rl_flags & RL_FLAG_LINK) { 1572 if (!(mii->mii_media_status & IFM_ACTIVE)) 1573 sc->rl_flags &= ~RL_FLAG_LINK; 1574 } else { 1575 if (mii->mii_media_status & IFM_ACTIVE && 1576 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1577 sc->rl_flags |= RL_FLAG_LINK; 1578 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1579 re_start(ifp); 1580 } 1581 } 1582 splx(s); 1583 1584 timeout_add_sec(&sc->timer_handle, 1); 1585 } 1586 1587 int 1588 re_intr(void *arg) 1589 { 1590 struct rl_softc *sc = arg; 1591 struct ifnet *ifp; 1592 u_int16_t status; 1593 int claimed = 0, rx, tx; 1594 1595 ifp = &sc->sc_arpcom.ac_if; 1596 1597 if (!(ifp->if_flags & IFF_RUNNING)) 1598 return (0); 1599 1600 rx = tx = 0; 1601 for (;;) { 1602 1603 status = CSR_READ_2(sc, RL_ISR); 1604 /* If the card has gone away the read returns 0xffff. */ 1605 if (status == 0xffff) 1606 break; 1607 if (status) 1608 CSR_WRITE_2(sc, RL_ISR, status); 1609 1610 if ((status & RL_INTRS_CPLUS) == 0) 1611 break; 1612 1613 if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) { 1614 rx |= re_rxeof(sc); 1615 claimed = 1; 1616 } 1617 1618 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1619 tx |= re_txeof(sc); 1620 claimed = 1; 1621 } 1622 1623 if (status & RL_ISR_SYSTEM_ERR) { 1624 re_reset(sc); 1625 re_init(ifp); 1626 claimed = 1; 1627 } 1628 1629 if (status & RL_ISR_LINKCHG) { 1630 timeout_del(&sc->timer_handle); 1631 re_tick(sc); 1632 claimed = 1; 1633 } 1634 } 1635 1636 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1637 if ((sc->rl_flags & RL_FLAG_TIMERINTR)) { 1638 if ((tx | rx) == 0) { 1639 /* 1640 * Nothing needs to be processed, fallback 1641 * to use TX/RX interrupts. 1642 */ 1643 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1644 1645 /* 1646 * Recollect, mainly to avoid the possible 1647 * race introduced by changing interrupt 1648 * masks. 1649 */ 1650 re_rxeof(sc); 1651 tx = re_txeof(sc); 1652 } else 1653 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1654 } else if (tx | rx) { 1655 /* 1656 * Assume that using simulated interrupt moderation 1657 * (hardware timer based) could reduce the interrupt 1658 * rate. 1659 */ 1660 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1661 } 1662 } 1663 1664 if (tx && !IFQ_IS_EMPTY(&ifp->if_snd)) 1665 re_start(ifp); 1666 1667 return (claimed); 1668 } 1669 1670 int 1671 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx) 1672 { 1673 bus_dmamap_t map; 1674 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; 1675 struct rl_desc *d; 1676 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1677 struct rl_txq *txq; 1678 1679 if (sc->rl_ldata.rl_tx_free <= RL_NTXDESC_RSVD) 1680 return (EFBIG); 1681 1682 /* 1683 * Set up checksum offload. Note: checksum offload bits must 1684 * appear in all descriptors of a multi-descriptor transmit 1685 * attempt. This is according to testing done with an 8169 1686 * chip. This is a requirement. 1687 */ 1688 1689 /* 1690 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1691 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1692 * RL_TDESC_CMD_UDPCSUM does not take affect. 1693 */ 1694 1695 if ((m->m_pkthdr.csum_flags & 1696 (M_IPV4_CSUM_OUT|M_TCPV4_CSUM_OUT|M_UDPV4_CSUM_OUT)) != 0) { 1697 if (sc->rl_flags & RL_FLAG_DESCV2) { 1698 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1699 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1700 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1701 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1702 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1703 } else { 1704 csum_flags |= RL_TDESC_CMD_IPCSUM; 1705 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1706 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1707 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1708 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1709 } 1710 } 1711 1712 txq = &sc->rl_ldata.rl_txq[*idx]; 1713 map = txq->txq_dmamap; 1714 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1715 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1716 if (error) { 1717 /* XXX try to defrag if EFBIG? */ 1718 printf("%s: can't map mbuf (error %d)\n", 1719 sc->sc_dev.dv_xname, error); 1720 return (error); 1721 } 1722 1723 nsegs = map->dm_nsegs; 1724 pad = 0; 1725 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 && 1726 m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN && 1727 (csum_flags & RL_TDESC_CMD_IPCSUM) != 0) { 1728 pad = 1; 1729 nsegs++; 1730 } 1731 1732 if (nsegs > sc->rl_ldata.rl_tx_free - RL_NTXDESC_RSVD) { 1733 error = EFBIG; 1734 goto fail_unload; 1735 } 1736 1737 /* 1738 * Make sure that the caches are synchronized before we 1739 * ask the chip to start DMA for the packet data. 1740 */ 1741 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1742 BUS_DMASYNC_PREWRITE); 1743 1744 /* 1745 * Set up hardware VLAN tagging. Note: vlan tag info must 1746 * appear in all descriptors of a multi-descriptor 1747 * transmission attempt. 1748 */ 1749 #if NVLAN > 0 1750 if (m->m_flags & M_VLANTAG) 1751 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1752 RL_TDESC_VLANCTL_TAG; 1753 #endif 1754 1755 /* 1756 * Map the segment array into descriptors. Note that we set the 1757 * start-of-frame and end-of-frame markers for either TX or RX, but 1758 * they really only have meaning in the TX case. (In the RX case, 1759 * it's the chip that tells us where packets begin and end.) 1760 * We also keep track of the end of the ring and set the 1761 * end-of-ring bits as needed, and we set the ownership bits 1762 * in all except the very first descriptor. (The caller will 1763 * set this descriptor later when it start transmission or 1764 * reception.) 1765 */ 1766 curidx = startidx = sc->rl_ldata.rl_tx_nextfree; 1767 lastidx = -1; 1768 for (seg = 0; seg < map->dm_nsegs; 1769 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { 1770 d = &sc->rl_ldata.rl_tx_list[curidx]; 1771 RL_TXDESCSYNC(sc, curidx, 1772 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1773 cmdstat = letoh32(d->rl_cmdstat); 1774 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); 1775 if (cmdstat & RL_TDESC_STAT_OWN) { 1776 printf("%s: tried to map busy TX descriptor\n", 1777 sc->sc_dev.dv_xname); 1778 for (; seg > 0; seg --) { 1779 uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) % 1780 RL_TX_DESC_CNT(sc); 1781 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; 1782 RL_TXDESCSYNC(sc, uidx, 1783 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1784 } 1785 error = ENOBUFS; 1786 goto fail_unload; 1787 } 1788 1789 d->rl_vlanctl = htole32(vlanctl); 1790 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1791 cmdstat = csum_flags | map->dm_segs[seg].ds_len; 1792 if (seg == 0) 1793 cmdstat |= RL_TDESC_CMD_SOF; 1794 else 1795 cmdstat |= RL_TDESC_CMD_OWN; 1796 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1797 cmdstat |= RL_TDESC_CMD_EOR; 1798 if (seg == nsegs - 1) { 1799 cmdstat |= RL_TDESC_CMD_EOF; 1800 lastidx = curidx; 1801 } 1802 d->rl_cmdstat = htole32(cmdstat); 1803 RL_TXDESCSYNC(sc, curidx, 1804 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1805 } 1806 if (pad) { 1807 bus_addr_t paddaddr; 1808 1809 d = &sc->rl_ldata.rl_tx_list[curidx]; 1810 d->rl_vlanctl = htole32(vlanctl); 1811 paddaddr = RL_TXPADDADDR(sc); 1812 re_set_bufaddr(d, paddaddr); 1813 cmdstat = csum_flags | 1814 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1815 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1816 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1817 cmdstat |= RL_TDESC_CMD_EOR; 1818 d->rl_cmdstat = htole32(cmdstat); 1819 RL_TXDESCSYNC(sc, curidx, 1820 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1821 lastidx = curidx; 1822 curidx = RL_NEXT_TX_DESC(sc, curidx); 1823 } 1824 KASSERT(lastidx != -1); 1825 1826 /* Transfer ownership of packet to the chip. */ 1827 1828 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= 1829 htole32(RL_TDESC_CMD_OWN); 1830 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1831 1832 /* update info of TX queue and descriptors */ 1833 txq->txq_mbuf = m; 1834 txq->txq_descidx = lastidx; 1835 txq->txq_nsegs = nsegs; 1836 1837 sc->rl_ldata.rl_tx_free -= nsegs; 1838 sc->rl_ldata.rl_tx_nextfree = curidx; 1839 1840 *idx = RL_NEXT_TXQ(sc, *idx); 1841 1842 return (0); 1843 1844 fail_unload: 1845 bus_dmamap_unload(sc->sc_dmat, map); 1846 1847 return (error); 1848 } 1849 1850 /* 1851 * Main transmit routine for C+ and gigE NICs. 1852 */ 1853 1854 void 1855 re_start(struct ifnet *ifp) 1856 { 1857 struct rl_softc *sc; 1858 int idx, queued = 0; 1859 1860 sc = ifp->if_softc; 1861 1862 if (ifp->if_flags & IFF_OACTIVE) 1863 return; 1864 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1865 return; 1866 1867 idx = sc->rl_ldata.rl_txq_prodidx; 1868 for (;;) { 1869 struct mbuf *m; 1870 int error; 1871 1872 IFQ_POLL(&ifp->if_snd, m); 1873 if (m == NULL) 1874 break; 1875 1876 if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) { 1877 KASSERT(idx == sc->rl_ldata.rl_txq_considx); 1878 ifp->if_flags |= IFF_OACTIVE; 1879 break; 1880 } 1881 1882 error = re_encap(sc, m, &idx); 1883 if (error == EFBIG && 1884 sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) { 1885 IFQ_DEQUEUE(&ifp->if_snd, m); 1886 m_freem(m); 1887 ifp->if_oerrors++; 1888 continue; 1889 } 1890 if (error) { 1891 ifp->if_flags |= IFF_OACTIVE; 1892 break; 1893 } 1894 1895 IFQ_DEQUEUE(&ifp->if_snd, m); 1896 queued++; 1897 1898 #if NBPFILTER > 0 1899 /* 1900 * If there's a BPF listener, bounce a copy of this frame 1901 * to him. 1902 */ 1903 if (ifp->if_bpf) 1904 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1905 #endif 1906 } 1907 1908 if (queued == 0) 1909 return; 1910 1911 sc->rl_ldata.rl_txq_prodidx = idx; 1912 1913 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1914 1915 /* 1916 * Set a timeout in case the chip goes out to lunch. 1917 */ 1918 ifp->if_timer = 5; 1919 } 1920 1921 int 1922 re_init(struct ifnet *ifp) 1923 { 1924 struct rl_softc *sc = ifp->if_softc; 1925 u_int32_t rxcfg = 0; 1926 u_int16_t cfg; 1927 int s; 1928 union { 1929 u_int32_t align_dummy; 1930 u_char eaddr[ETHER_ADDR_LEN]; 1931 } eaddr; 1932 1933 s = splnet(); 1934 1935 /* 1936 * Cancel pending I/O and free all RX/TX buffers. 1937 */ 1938 re_stop(ifp, 0); 1939 1940 /* 1941 * Enable C+ RX and TX mode, as well as RX checksum offload. 1942 * We must configure the C+ register before all others. 1943 */ 1944 cfg = RL_CPLUSCMD_PCI_MRW; 1945 if (ifp->if_capabilities & IFCAP_CSUM_IPv4) 1946 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 1947 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1948 cfg |= RL_CPLUSCMD_VLANSTRIP; 1949 if (sc->rl_flags & RL_FLAG_MACSTAT) { 1950 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1951 /* XXX magic. */ 1952 cfg |= 0x0001; 1953 } else { 1954 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 1955 } 1956 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1957 1958 /* 1959 * Init our MAC address. Even though the chipset 1960 * documentation doesn't mention it, we need to enter "Config 1961 * register write enable" mode to modify the ID registers. 1962 */ 1963 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 1964 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1965 CSR_WRITE_4(sc, RL_IDR4, 1966 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 1967 CSR_WRITE_4(sc, RL_IDR0, 1968 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 1969 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1970 1971 /* 1972 * For C+ mode, initialize the RX descriptors and mbufs. 1973 */ 1974 re_rx_list_init(sc); 1975 re_tx_list_init(sc); 1976 1977 /* 1978 * Load the addresses of the RX and TX lists into the chip. 1979 */ 1980 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 1981 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1982 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 1983 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 1984 1985 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 1986 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1987 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 1988 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 1989 1990 /* 1991 * Enable transmit and receive. 1992 */ 1993 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1994 1995 /* 1996 * Set the initial TX and RX configuration. 1997 */ 1998 if (sc->rl_testmode) { 1999 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2000 CSR_WRITE_4(sc, RL_TXCFG, 2001 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 2002 else 2003 CSR_WRITE_4(sc, RL_TXCFG, 2004 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 2005 } else 2006 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2007 2008 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 2009 2010 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2011 2012 /* Set the individual bit to receive frames for this host only. */ 2013 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2014 rxcfg |= RL_RXCFG_RX_INDIV; 2015 2016 /* 2017 * Set capture broadcast bit to capture broadcast frames. 2018 */ 2019 if (ifp->if_flags & IFF_BROADCAST) 2020 rxcfg |= RL_RXCFG_RX_BROAD; 2021 else 2022 rxcfg &= ~RL_RXCFG_RX_BROAD; 2023 2024 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2025 2026 /* Program promiscuous mode and multicast filters. */ 2027 re_iff(sc); 2028 2029 /* 2030 * Enable interrupts. 2031 */ 2032 if (sc->rl_testmode) 2033 CSR_WRITE_2(sc, RL_IMR, 0); 2034 else 2035 re_setup_intr(sc, 1, sc->rl_imtype); 2036 CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype); 2037 2038 /* Start RX/TX process. */ 2039 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2040 #ifdef notdef 2041 /* Enable receiver and transmitter. */ 2042 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2043 #endif 2044 2045 /* 2046 * For 8169 gigE NICs, set the max allowed RX packet 2047 * size so we can receive jumbo frames. 2048 */ 2049 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 2050 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2051 2052 if (sc->rl_testmode) 2053 return (0); 2054 2055 mii_mediachg(&sc->sc_mii); 2056 2057 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 2058 2059 ifp->if_flags |= IFF_RUNNING; 2060 ifp->if_flags &= ~IFF_OACTIVE; 2061 2062 splx(s); 2063 2064 sc->rl_flags &= ~RL_FLAG_LINK; 2065 2066 timeout_add_sec(&sc->timer_handle, 1); 2067 2068 return (0); 2069 } 2070 2071 /* 2072 * Set media options. 2073 */ 2074 int 2075 re_ifmedia_upd(struct ifnet *ifp) 2076 { 2077 struct rl_softc *sc; 2078 2079 sc = ifp->if_softc; 2080 2081 return (mii_mediachg(&sc->sc_mii)); 2082 } 2083 2084 /* 2085 * Report current media status. 2086 */ 2087 void 2088 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2089 { 2090 struct rl_softc *sc; 2091 2092 sc = ifp->if_softc; 2093 2094 mii_pollstat(&sc->sc_mii); 2095 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2096 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2097 } 2098 2099 int 2100 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2101 { 2102 struct rl_softc *sc = ifp->if_softc; 2103 struct ifreq *ifr = (struct ifreq *) data; 2104 struct ifaddr *ifa = (struct ifaddr *)data; 2105 int s, error = 0; 2106 2107 s = splnet(); 2108 2109 switch(command) { 2110 case SIOCSIFADDR: 2111 ifp->if_flags |= IFF_UP; 2112 if (!(ifp->if_flags & IFF_RUNNING)) 2113 re_init(ifp); 2114 #ifdef INET 2115 if (ifa->ifa_addr->sa_family == AF_INET) 2116 arp_ifinit(&sc->sc_arpcom, ifa); 2117 #endif /* INET */ 2118 break; 2119 case SIOCSIFFLAGS: 2120 if (ifp->if_flags & IFF_UP) { 2121 if (ifp->if_flags & IFF_RUNNING) 2122 re_iff(sc); 2123 else 2124 re_init(ifp); 2125 } else { 2126 if (ifp->if_flags & IFF_RUNNING) 2127 re_stop(ifp, 1); 2128 } 2129 sc->if_flags = ifp->if_flags; 2130 break; 2131 case SIOCGIFMEDIA: 2132 case SIOCSIFMEDIA: 2133 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2134 break; 2135 default: 2136 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2137 } 2138 2139 if (error == ENETRESET) { 2140 if (ifp->if_flags & IFF_RUNNING) 2141 re_iff(sc); 2142 error = 0; 2143 } 2144 2145 splx(s); 2146 return (error); 2147 } 2148 2149 void 2150 re_watchdog(struct ifnet *ifp) 2151 { 2152 struct rl_softc *sc; 2153 int s; 2154 2155 sc = ifp->if_softc; 2156 s = splnet(); 2157 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2158 ifp->if_oerrors++; 2159 2160 re_txeof(sc); 2161 re_rxeof(sc); 2162 2163 re_init(ifp); 2164 2165 splx(s); 2166 } 2167 2168 /* 2169 * Stop the adapter and free any mbufs allocated to the 2170 * RX and TX lists. 2171 */ 2172 void 2173 re_stop(struct ifnet *ifp, int disable) 2174 { 2175 struct rl_softc *sc; 2176 int i; 2177 2178 sc = ifp->if_softc; 2179 2180 ifp->if_timer = 0; 2181 sc->rl_flags &= ~(RL_FLAG_LINK|RL_FLAG_TIMERINTR); 2182 2183 timeout_del(&sc->timer_handle); 2184 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2185 2186 mii_down(&sc->sc_mii); 2187 2188 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2189 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2190 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2191 2192 if (sc->rl_head != NULL) { 2193 m_freem(sc->rl_head); 2194 sc->rl_head = sc->rl_tail = NULL; 2195 } 2196 2197 /* Free the TX list buffers. */ 2198 for (i = 0; i < RL_TX_QLEN; i++) { 2199 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2200 bus_dmamap_unload(sc->sc_dmat, 2201 sc->rl_ldata.rl_txq[i].txq_dmamap); 2202 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2203 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2204 } 2205 } 2206 2207 /* Free the RX list buffers. */ 2208 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2209 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2210 bus_dmamap_unload(sc->sc_dmat, 2211 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2212 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2213 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2214 } 2215 } 2216 } 2217 2218 void 2219 re_setup_hw_im(struct rl_softc *sc) 2220 { 2221 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2222 2223 /* 2224 * Interrupt moderation 2225 * 2226 * 0xABCD 2227 * A - unknown (maybe TX related) 2228 * B - TX timer (unit: 25us) 2229 * C - unknown (maybe RX related) 2230 * D - RX timer (unit: 25us) 2231 * 2232 * 2233 * re(4)'s interrupt moderation is actually controlled by 2234 * two variables, like most other NICs (bge, bnx etc.) 2235 * o timer 2236 * o number of packets [P] 2237 * 2238 * The logic relationship between these two variables is 2239 * similar to other NICs too: 2240 * if (timer expire || packets > [P]) 2241 * Interrupt is delivered 2242 * 2243 * Currently we only know how to set 'timer', but not 2244 * 'number of packets', which should be ~30, as far as I 2245 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2246 */ 2247 CSR_WRITE_2(sc, RL_IM, 2248 RL_IM_RXTIME(sc->rl_rx_time) | 2249 RL_IM_TXTIME(sc->rl_tx_time) | 2250 RL_IM_MAGIC); 2251 } 2252 2253 void 2254 re_disable_hw_im(struct rl_softc *sc) 2255 { 2256 if (sc->rl_flags & RL_FLAG_HWIM) 2257 CSR_WRITE_2(sc, RL_IM, 0); 2258 } 2259 2260 void 2261 re_setup_sim_im(struct rl_softc *sc) 2262 { 2263 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2264 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2265 else { 2266 u_int32_t ticks; 2267 2268 /* 2269 * Datasheet says tick decreases at bus speed, 2270 * but it seems the clock runs a little bit 2271 * faster, so we do some compensation here. 2272 */ 2273 ticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2274 CSR_WRITE_4(sc, RL_TIMERINT_8169, ticks); 2275 } 2276 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2277 sc->rl_flags |= RL_FLAG_TIMERINTR; 2278 } 2279 2280 void 2281 re_disable_sim_im(struct rl_softc *sc) 2282 { 2283 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2284 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2285 else 2286 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2287 sc->rl_flags &= ~RL_FLAG_TIMERINTR; 2288 } 2289 2290 void 2291 re_config_imtype(struct rl_softc *sc, int imtype) 2292 { 2293 switch (imtype) { 2294 case RL_IMTYPE_HW: 2295 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2296 /* FALL THROUGH */ 2297 case RL_IMTYPE_NONE: 2298 sc->rl_intrs = RL_INTRS_CPLUS; 2299 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2300 RL_ISR_RX_OVERRUN; 2301 sc->rl_tx_ack = RL_ISR_TX_OK; 2302 break; 2303 2304 case RL_IMTYPE_SIM: 2305 sc->rl_intrs = RL_INTRS_TIMER; 2306 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2307 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2308 break; 2309 2310 default: 2311 panic("%s: unknown imtype %d\n", 2312 sc->sc_dev.dv_xname, imtype); 2313 } 2314 } 2315 2316 void 2317 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2318 { 2319 re_config_imtype(sc, imtype); 2320 2321 if (enable_intrs) 2322 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2323 else 2324 CSR_WRITE_2(sc, RL_IMR, 0); 2325 2326 switch (imtype) { 2327 case RL_IMTYPE_NONE: 2328 re_disable_sim_im(sc); 2329 re_disable_hw_im(sc); 2330 break; 2331 2332 case RL_IMTYPE_HW: 2333 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2334 re_disable_sim_im(sc); 2335 re_setup_hw_im(sc); 2336 break; 2337 2338 case RL_IMTYPE_SIM: 2339 re_disable_hw_im(sc); 2340 re_setup_sim_im(sc); 2341 break; 2342 2343 default: 2344 panic("%s: unknown imtype %d\n", 2345 sc->sc_dev.dv_xname, imtype); 2346 } 2347 } 2348