1 /* $OpenBSD: re.c,v 1.139 2012/05/09 13:30:12 jsg Exp $ */ 2 /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ 3 /* 4 * Copyright (c) 1997, 1998-2003 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 */ 42 43 /* 44 * This driver is designed to support RealTek's next generation of 45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 48 * 49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 50 * with the older 8139 family, however it also supports a special 51 * C+ mode of operation that provides several new performance enhancing 52 * features. These include: 53 * 54 * o Descriptor based DMA mechanism. Each descriptor represents 55 * a single packet fragment. Data buffers may be aligned on 56 * any byte boundary. 57 * 58 * o 64-bit DMA 59 * 60 * o TCP/IP checksum offload for both RX and TX 61 * 62 * o High and normal priority transmit DMA rings 63 * 64 * o VLAN tag insertion and extraction 65 * 66 * o TCP large send (segmentation offload) 67 * 68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 69 * programming API is fairly straightforward. The RX filtering, EEPROM 70 * access and PHY access is the same as it is on the older 8139 series 71 * chips. 72 * 73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 74 * same programming API and feature set as the 8139C+ with the following 75 * differences and additions: 76 * 77 * o 1000Mbps mode 78 * 79 * o Jumbo frames 80 * 81 * o GMII and TBI ports/registers for interfacing with copper 82 * or fiber PHYs 83 * 84 * o RX and TX DMA rings can have up to 1024 descriptors 85 * (the 8139C+ allows a maximum of 64) 86 * 87 * o Slight differences in register layout from the 8139C+ 88 * 89 * The TX start and timer interrupt registers are at different locations 90 * on the 8169 than they are on the 8139C+. Also, the status word in the 91 * RX descriptor has a slightly different bit layout. The 8169 does not 92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 93 * copper gigE PHY. 94 * 95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 96 * (the 'S' stands for 'single-chip'). These devices have the same 97 * programming API as the older 8169, but also have some vendor-specific 98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 99 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 100 * 101 * This driver takes advantage of the RX and TX checksum offload and 102 * VLAN tag insertion/extraction features. It also implements TX 103 * interrupt moderation using the timer interrupt registers, which 104 * significantly reduces TX interrupt load. There is also support 105 * for jumbo frames, however the 8169/8169S/8110S can not transmit 106 * jumbo frames larger than 7440, so the max MTU possible with this 107 * driver is 7422 bytes. 108 */ 109 110 #include "bpfilter.h" 111 #include "vlan.h" 112 113 #include <sys/param.h> 114 #include <sys/endian.h> 115 #include <sys/systm.h> 116 #include <sys/sockio.h> 117 #include <sys/mbuf.h> 118 #include <sys/malloc.h> 119 #include <sys/kernel.h> 120 #include <sys/device.h> 121 #include <sys/timeout.h> 122 #include <sys/socket.h> 123 124 #include <net/if.h> 125 #include <net/if_dl.h> 126 #include <net/if_media.h> 127 128 #ifdef INET 129 #include <netinet/in.h> 130 #include <netinet/in_systm.h> 131 #include <netinet/in_var.h> 132 #include <netinet/ip.h> 133 #include <netinet/if_ether.h> 134 #endif 135 136 #if NVLAN > 0 137 #include <net/if_types.h> 138 #include <net/if_vlan_var.h> 139 #endif 140 141 #if NBPFILTER > 0 142 #include <net/bpf.h> 143 #endif 144 145 #include <dev/mii/mii.h> 146 #include <dev/mii/miivar.h> 147 148 #include <dev/pci/pcireg.h> 149 #include <dev/pci/pcivar.h> 150 151 #include <dev/ic/rtl81x9reg.h> 152 #include <dev/ic/revar.h> 153 154 #ifdef RE_DEBUG 155 int redebug = 0; 156 #define DPRINTF(x) do { if (redebug) printf x; } while (0) 157 #else 158 #define DPRINTF(x) 159 #endif 160 161 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); 162 163 int re_encap(struct rl_softc *, struct mbuf *, int *); 164 165 int re_newbuf(struct rl_softc *); 166 int re_rx_list_init(struct rl_softc *); 167 void re_rx_list_fill(struct rl_softc *); 168 int re_tx_list_init(struct rl_softc *); 169 int re_rxeof(struct rl_softc *); 170 int re_txeof(struct rl_softc *); 171 void re_tick(void *); 172 void re_start(struct ifnet *); 173 int re_ioctl(struct ifnet *, u_long, caddr_t); 174 void re_watchdog(struct ifnet *); 175 int re_ifmedia_upd(struct ifnet *); 176 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 177 178 void re_eeprom_putbyte(struct rl_softc *, int); 179 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); 180 void re_read_eeprom(struct rl_softc *, caddr_t, int, int); 181 182 int re_gmii_readreg(struct device *, int, int); 183 void re_gmii_writereg(struct device *, int, int, int); 184 185 int re_miibus_readreg(struct device *, int, int); 186 void re_miibus_writereg(struct device *, int, int, int); 187 void re_miibus_statchg(struct device *); 188 189 void re_iff(struct rl_softc *); 190 191 void re_setup_hw_im(struct rl_softc *); 192 void re_setup_sim_im(struct rl_softc *); 193 void re_disable_hw_im(struct rl_softc *); 194 void re_disable_sim_im(struct rl_softc *); 195 void re_config_imtype(struct rl_softc *, int); 196 void re_setup_intr(struct rl_softc *, int, int); 197 #ifndef SMALL_KERNEL 198 int re_wol(struct ifnet*, int); 199 #endif 200 201 #ifdef RE_DIAG 202 int re_diag(struct rl_softc *); 203 #endif 204 205 struct cfdriver re_cd = { 206 0, "re", DV_IFNET 207 }; 208 209 #define EE_SET(x) \ 210 CSR_WRITE_1(sc, RL_EECMD, \ 211 CSR_READ_1(sc, RL_EECMD) | x) 212 213 #define EE_CLR(x) \ 214 CSR_WRITE_1(sc, RL_EECMD, \ 215 CSR_READ_1(sc, RL_EECMD) & ~x) 216 217 static const struct re_revision { 218 u_int32_t re_chipid; 219 const char *re_name; 220 } re_revisions[] = { 221 { RL_HWREV_8100, "RTL8100" }, 222 { RL_HWREV_8100E_SPIN1, "RTL8100E 1" }, 223 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" }, 224 { RL_HWREV_8101, "RTL8101" }, 225 { RL_HWREV_8101E, "RTL8101E" }, 226 { RL_HWREV_8102E, "RTL8102E" }, 227 { RL_HWREV_8401E, "RTL8401E" }, 228 { RL_HWREV_8402, "RTL8402" }, 229 { RL_HWREV_8411, "RTL8411" }, 230 { RL_HWREV_8102EL, "RTL8102EL" }, 231 { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" }, 232 { RL_HWREV_8103E, "RTL8103E" }, 233 { RL_HWREV_8110S, "RTL8110S" }, 234 { RL_HWREV_8139CPLUS, "RTL8139C+" }, 235 { RL_HWREV_8168_SPIN1, "RTL8168 1" }, 236 { RL_HWREV_8168_SPIN2, "RTL8168 2" }, 237 { RL_HWREV_8168_SPIN3, "RTL8168 3" }, 238 { RL_HWREV_8168C, "RTL8168C/8111C" }, 239 { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" }, 240 { RL_HWREV_8168CP, "RTL8168CP/8111CP" }, 241 { RL_HWREV_8168F, "RTL8168F/8111F" }, 242 { RL_HWREV_8105E, "RTL8105E" }, 243 { RL_HWREV_8105E_SPIN1, "RTL8105E" }, 244 { RL_HWREV_8168D, "RTL8168D/8111D" }, 245 { RL_HWREV_8168DP, "RTL8168DP/8111DP" }, 246 { RL_HWREV_8168E, "RTL8168E/8111E" }, 247 { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" }, 248 { RL_HWREV_8169, "RTL8169" }, 249 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" }, 250 { RL_HWREV_8169_8110SBL, "RTL8169SBL" }, 251 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" }, 252 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" }, 253 { RL_HWREV_8169S, "RTL8169S" }, 254 255 { 0, NULL } 256 }; 257 258 259 static inline void 260 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) 261 { 262 d->rl_bufaddr_lo = htole32((uint32_t)addr); 263 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 264 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32); 265 else 266 d->rl_bufaddr_hi = 0; 267 } 268 269 /* 270 * Send a read command and address to the EEPROM, check for ACK. 271 */ 272 void 273 re_eeprom_putbyte(struct rl_softc *sc, int addr) 274 { 275 int d, i; 276 277 d = addr | (RL_9346_READ << sc->rl_eewidth); 278 279 /* 280 * Feed in each bit and strobe the clock. 281 */ 282 283 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 284 if (d & i) 285 EE_SET(RL_EE_DATAIN); 286 else 287 EE_CLR(RL_EE_DATAIN); 288 DELAY(100); 289 EE_SET(RL_EE_CLK); 290 DELAY(150); 291 EE_CLR(RL_EE_CLK); 292 DELAY(100); 293 } 294 } 295 296 /* 297 * Read a word of data stored in the EEPROM at address 'addr.' 298 */ 299 void 300 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 301 { 302 int i; 303 u_int16_t word = 0; 304 305 /* 306 * Send address of word we want to read. 307 */ 308 re_eeprom_putbyte(sc, addr); 309 310 /* 311 * Start reading bits from EEPROM. 312 */ 313 for (i = 0x8000; i; i >>= 1) { 314 EE_SET(RL_EE_CLK); 315 DELAY(100); 316 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 317 word |= i; 318 EE_CLR(RL_EE_CLK); 319 DELAY(100); 320 } 321 322 *dest = word; 323 } 324 325 /* 326 * Read a sequence of words from the EEPROM. 327 */ 328 void 329 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 330 { 331 int i; 332 u_int16_t word = 0, *ptr; 333 334 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 335 336 DELAY(100); 337 338 for (i = 0; i < cnt; i++) { 339 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 340 re_eeprom_getword(sc, off + i, &word); 341 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 342 ptr = (u_int16_t *)(dest + (i * 2)); 343 *ptr = word; 344 } 345 346 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 347 } 348 349 int 350 re_gmii_readreg(struct device *self, int phy, int reg) 351 { 352 struct rl_softc *sc = (struct rl_softc *)self; 353 u_int32_t rval; 354 int i; 355 356 if (phy != 7) 357 return (0); 358 359 /* Let the rgephy driver read the GMEDIASTAT register */ 360 361 if (reg == RL_GMEDIASTAT) { 362 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 363 return (rval); 364 } 365 366 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 367 368 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 369 rval = CSR_READ_4(sc, RL_PHYAR); 370 if (rval & RL_PHYAR_BUSY) 371 break; 372 DELAY(25); 373 } 374 375 if (i == RL_PHY_TIMEOUT) { 376 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); 377 return (0); 378 } 379 380 DELAY(20); 381 382 return (rval & RL_PHYAR_PHYDATA); 383 } 384 385 void 386 re_gmii_writereg(struct device *dev, int phy, int reg, int data) 387 { 388 struct rl_softc *sc = (struct rl_softc *)dev; 389 u_int32_t rval; 390 int i; 391 392 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 393 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 394 395 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 396 rval = CSR_READ_4(sc, RL_PHYAR); 397 if (!(rval & RL_PHYAR_BUSY)) 398 break; 399 DELAY(25); 400 } 401 402 if (i == RL_PHY_TIMEOUT) 403 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); 404 405 DELAY(20); 406 } 407 408 int 409 re_miibus_readreg(struct device *dev, int phy, int reg) 410 { 411 struct rl_softc *sc = (struct rl_softc *)dev; 412 u_int16_t rval = 0; 413 u_int16_t re8139_reg = 0; 414 int s; 415 416 s = splnet(); 417 418 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 419 rval = re_gmii_readreg(dev, phy, reg); 420 splx(s); 421 return (rval); 422 } 423 424 /* Pretend the internal PHY is only at address 0 */ 425 if (phy) { 426 splx(s); 427 return (0); 428 } 429 switch(reg) { 430 case MII_BMCR: 431 re8139_reg = RL_BMCR; 432 break; 433 case MII_BMSR: 434 re8139_reg = RL_BMSR; 435 break; 436 case MII_ANAR: 437 re8139_reg = RL_ANAR; 438 break; 439 case MII_ANER: 440 re8139_reg = RL_ANER; 441 break; 442 case MII_ANLPAR: 443 re8139_reg = RL_LPAR; 444 break; 445 case MII_PHYIDR1: 446 case MII_PHYIDR2: 447 splx(s); 448 return (0); 449 /* 450 * Allow the rlphy driver to read the media status 451 * register. If we have a link partner which does not 452 * support NWAY, this is the register which will tell 453 * us the results of parallel detection. 454 */ 455 case RL_MEDIASTAT: 456 rval = CSR_READ_1(sc, RL_MEDIASTAT); 457 splx(s); 458 return (rval); 459 default: 460 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 461 splx(s); 462 return (0); 463 } 464 rval = CSR_READ_2(sc, re8139_reg); 465 if (re8139_reg == RL_BMCR) { 466 /* 8139C+ has different bit layout. */ 467 rval &= ~(BMCR_LOOP | BMCR_ISO); 468 } 469 splx(s); 470 return (rval); 471 } 472 473 void 474 re_miibus_writereg(struct device *dev, int phy, int reg, int data) 475 { 476 struct rl_softc *sc = (struct rl_softc *)dev; 477 u_int16_t re8139_reg = 0; 478 int s; 479 480 s = splnet(); 481 482 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) { 483 re_gmii_writereg(dev, phy, reg, data); 484 splx(s); 485 return; 486 } 487 488 /* Pretend the internal PHY is only at address 0 */ 489 if (phy) { 490 splx(s); 491 return; 492 } 493 switch(reg) { 494 case MII_BMCR: 495 re8139_reg = RL_BMCR; 496 /* 8139C+ has different bit layout. */ 497 data &= ~(BMCR_LOOP | BMCR_ISO); 498 break; 499 case MII_BMSR: 500 re8139_reg = RL_BMSR; 501 break; 502 case MII_ANAR: 503 re8139_reg = RL_ANAR; 504 break; 505 case MII_ANER: 506 re8139_reg = RL_ANER; 507 break; 508 case MII_ANLPAR: 509 re8139_reg = RL_LPAR; 510 break; 511 case MII_PHYIDR1: 512 case MII_PHYIDR2: 513 splx(s); 514 return; 515 break; 516 default: 517 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); 518 splx(s); 519 return; 520 } 521 CSR_WRITE_2(sc, re8139_reg, data); 522 splx(s); 523 } 524 525 void 526 re_miibus_statchg(struct device *dev) 527 { 528 } 529 530 void 531 re_iff(struct rl_softc *sc) 532 { 533 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 534 int h = 0; 535 u_int32_t hashes[2]; 536 u_int32_t rxfilt; 537 struct arpcom *ac = &sc->sc_arpcom; 538 struct ether_multi *enm; 539 struct ether_multistep step; 540 541 rxfilt = CSR_READ_4(sc, RL_RXCFG); 542 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 543 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 544 ifp->if_flags &= ~IFF_ALLMULTI; 545 546 /* 547 * Always accept frames destined to our station address. 548 * Always accept broadcast frames. 549 */ 550 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 551 552 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 553 ifp->if_flags |= IFF_ALLMULTI; 554 rxfilt |= RL_RXCFG_RX_MULTI; 555 if (ifp->if_flags & IFF_PROMISC) 556 rxfilt |= RL_RXCFG_RX_ALLPHYS; 557 hashes[0] = hashes[1] = 0xFFFFFFFF; 558 } else { 559 rxfilt |= RL_RXCFG_RX_MULTI; 560 /* Program new filter. */ 561 bzero(hashes, sizeof(hashes)); 562 563 ETHER_FIRST_MULTI(step, ac, enm); 564 while (enm != NULL) { 565 h = ether_crc32_be(enm->enm_addrlo, 566 ETHER_ADDR_LEN) >> 26; 567 568 if (h < 32) 569 hashes[0] |= (1 << h); 570 else 571 hashes[1] |= (1 << (h - 32)); 572 573 ETHER_NEXT_MULTI(step, enm); 574 } 575 } 576 577 /* 578 * For some unfathomable reason, RealTek decided to reverse 579 * the order of the multicast hash registers in the PCI Express 580 * parts. This means we have to write the hash pattern in reverse 581 * order for those devices. 582 */ 583 if (sc->rl_flags & RL_FLAG_INVMAR) { 584 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1])); 585 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0])); 586 } else { 587 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 588 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 589 } 590 591 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 592 } 593 594 void 595 re_reset(struct rl_softc *sc) 596 { 597 int i; 598 599 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 600 601 for (i = 0; i < RL_TIMEOUT; i++) { 602 DELAY(10); 603 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 604 break; 605 } 606 if (i == RL_TIMEOUT) 607 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 608 609 if (sc->rl_flags & RL_FLAG_MACLDPS) 610 CSR_WRITE_1(sc, RL_LDPS, 1); 611 } 612 613 #ifdef RE_DIAG 614 615 /* 616 * The following routine is designed to test for a defect on some 617 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 618 * lines connected to the bus, however for a 32-bit only card, they 619 * should be pulled high. The result of this defect is that the 620 * NIC will not work right if you plug it into a 64-bit slot: DMA 621 * operations will be done with 64-bit transfers, which will fail 622 * because the 64-bit data lines aren't connected. 623 * 624 * There's no way to work around this (short of talking a soldering 625 * iron to the board), however we can detect it. The method we use 626 * here is to put the NIC into digital loopback mode, set the receiver 627 * to promiscuous mode, and then try to send a frame. We then compare 628 * the frame data we sent to what was received. If the data matches, 629 * then the NIC is working correctly, otherwise we know the user has 630 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 631 * slot. In the latter case, there's no way the NIC can work correctly, 632 * so we print out a message on the console and abort the device attach. 633 */ 634 635 int 636 re_diag(struct rl_softc *sc) 637 { 638 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 639 struct mbuf *m0; 640 struct ether_header *eh; 641 struct rl_rxsoft *rxs; 642 struct rl_desc *cur_rx; 643 bus_dmamap_t dmamap; 644 u_int16_t status; 645 u_int32_t rxstat; 646 int total_len, i, s, error = 0, phyaddr; 647 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 648 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 649 650 DPRINTF(("inside re_diag\n")); 651 /* Allocate a single mbuf */ 652 653 MGETHDR(m0, M_DONTWAIT, MT_DATA); 654 if (m0 == NULL) 655 return (ENOBUFS); 656 657 /* 658 * Initialize the NIC in test mode. This sets the chip up 659 * so that it can send and receive frames, but performs the 660 * following special functions: 661 * - Puts receiver in promiscuous mode 662 * - Enables digital loopback mode 663 * - Leaves interrupts turned off 664 */ 665 666 ifp->if_flags |= IFF_PROMISC; 667 sc->rl_testmode = 1; 668 re_reset(sc); 669 re_init(ifp); 670 sc->rl_flags |= RL_FLAG_LINK; 671 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 672 phyaddr = 0; 673 else 674 phyaddr = 1; 675 676 re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR, 677 BMCR_RESET); 678 for (i = 0; i < RL_TIMEOUT; i++) { 679 status = re_miibus_readreg((struct device *)sc, 680 phyaddr, MII_BMCR); 681 if (!(status & BMCR_RESET)) 682 break; 683 } 684 685 re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR, 686 BMCR_LOOP); 687 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 688 689 DELAY(100000); 690 691 /* Put some data in the mbuf */ 692 693 eh = mtod(m0, struct ether_header *); 694 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 695 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 696 eh->ether_type = htons(ETHERTYPE_IP); 697 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 698 699 /* 700 * Queue the packet, start transmission. 701 */ 702 703 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 704 s = splnet(); 705 IFQ_ENQUEUE(&ifp->if_snd, m0, NULL, error); 706 re_start(ifp); 707 splx(s); 708 m0 = NULL; 709 710 DPRINTF(("re_diag: transmission started\n")); 711 712 /* Wait for it to propagate through the chip */ 713 714 DELAY(100000); 715 for (i = 0; i < RL_TIMEOUT; i++) { 716 status = CSR_READ_2(sc, RL_ISR); 717 CSR_WRITE_2(sc, RL_ISR, status); 718 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 719 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 720 break; 721 DELAY(10); 722 } 723 if (i == RL_TIMEOUT) { 724 printf("%s: diagnostic failed, failed to receive packet " 725 "in loopback mode\n", sc->sc_dev.dv_xname); 726 error = EIO; 727 goto done; 728 } 729 730 /* 731 * The packet should have been dumped into the first 732 * entry in the RX DMA ring. Grab it from there. 733 */ 734 735 rxs = &sc->rl_ldata.rl_rxsoft[0]; 736 dmamap = rxs->rxs_dmamap; 737 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 738 BUS_DMASYNC_POSTREAD); 739 bus_dmamap_unload(sc->sc_dmat, dmamap); 740 741 m0 = rxs->rxs_mbuf; 742 rxs->rxs_mbuf = NULL; 743 eh = mtod(m0, struct ether_header *); 744 745 RL_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 746 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 747 rxstat = letoh32(cur_rx->rl_cmdstat); 748 total_len = rxstat & sc->rl_rxlenmask; 749 750 if (total_len != ETHER_MIN_LEN) { 751 printf("%s: diagnostic failed, received short packet\n", 752 sc->sc_dev.dv_xname); 753 error = EIO; 754 goto done; 755 } 756 757 DPRINTF(("re_diag: packet received\n")); 758 759 /* Test that the received packet data matches what we sent. */ 760 761 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 762 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 763 ntohs(eh->ether_type) != ETHERTYPE_IP) { 764 printf("%s: WARNING, DMA FAILURE!\n", sc->sc_dev.dv_xname); 765 printf("%s: expected TX data: %s", 766 sc->sc_dev.dv_xname, ether_sprintf(dst)); 767 printf("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP); 768 printf("%s: received RX data: %s", 769 sc->sc_dev.dv_xname, 770 ether_sprintf(eh->ether_dhost)); 771 printf("/%s/0x%x\n", ether_sprintf(eh->ether_shost), 772 ntohs(eh->ether_type)); 773 printf("%s: You may have a defective 32-bit NIC plugged " 774 "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname); 775 printf("%s: Please re-install the NIC in a 32-bit slot " 776 "for proper operation.\n", sc->sc_dev.dv_xname); 777 printf("%s: Read the re(4) man page for more details.\n", 778 sc->sc_dev.dv_xname); 779 error = EIO; 780 } 781 782 done: 783 /* Turn interface off, release resources */ 784 sc->rl_testmode = 0; 785 sc->rl_flags &= ~RL_FLAG_LINK; 786 ifp->if_flags &= ~IFF_PROMISC; 787 re_stop(ifp); 788 if (m0 != NULL) 789 m_freem(m0); 790 DPRINTF(("leaving re_diag\n")); 791 792 return (error); 793 } 794 795 #endif 796 797 #ifdef __armish__ 798 /* 799 * Thecus N2100 doesn't store the full mac address in eeprom 800 * so we read the old mac address from the device before the reset 801 * in hopes that the proper mac address is already there. 802 */ 803 union { 804 u_int32_t eaddr_word[2]; 805 u_char eaddr[ETHER_ADDR_LEN]; 806 } boot_eaddr; 807 int boot_eaddr_valid; 808 #endif /* __armish__ */ 809 /* 810 * Attach the interface. Allocate softc structures, do ifmedia 811 * setup and ethernet/BPF attach. 812 */ 813 int 814 re_attach(struct rl_softc *sc, const char *intrstr) 815 { 816 u_char eaddr[ETHER_ADDR_LEN]; 817 u_int16_t as[ETHER_ADDR_LEN / 2]; 818 struct ifnet *ifp; 819 u_int16_t re_did = 0; 820 int error = 0, i; 821 const struct re_revision *rr; 822 const char *re_name = NULL; 823 824 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 825 826 switch (sc->sc_hwrev) { 827 case RL_HWREV_8139CPLUS: 828 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_AUTOPAD; 829 break; 830 case RL_HWREV_8100E_SPIN1: 831 case RL_HWREV_8100E_SPIN2: 832 case RL_HWREV_8101E: 833 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 834 RL_FLAG_PHYWAKE; 835 break; 836 case RL_HWREV_8103E: 837 sc->rl_flags |= RL_FLAG_MACSLEEP; 838 /* FALLTHROUGH */ 839 case RL_HWREV_8102E: 840 case RL_HWREV_8102EL: 841 case RL_HWREV_8102EL_SPIN1: 842 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR | 843 RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 844 RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 845 break; 846 case RL_HWREV_8401E: 847 case RL_HWREV_8402: 848 case RL_HWREV_8105E: 849 case RL_HWREV_8105E_SPIN1: 850 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 851 RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | 852 RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 853 RL_FLAG_NOJUMBO; 854 break; 855 case RL_HWREV_8168_SPIN1: 856 case RL_HWREV_8168_SPIN2: 857 case RL_HWREV_8168_SPIN3: 858 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 859 RL_FLAG_MACSTAT | RL_FLAG_HWIM; 860 break; 861 case RL_HWREV_8168C_SPIN2: 862 sc->rl_flags |= RL_FLAG_MACSLEEP; 863 /* FALLTHROUGH */ 864 case RL_HWREV_8168C: 865 case RL_HWREV_8168CP: 866 case RL_HWREV_8168DP: 867 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 868 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 869 RL_FLAG_HWIM | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 870 /* 871 * These controllers support jumbo frame but it seems 872 * that enabling it requires touching additional magic 873 * registers. Depending on MAC revisions some 874 * controllers need to disable checksum offload. So 875 * disable jumbo frame until I have better idea what 876 * it really requires to make it support. 877 * RTL8168C/CP : supports up to 6KB jumbo frame. 878 * RTL8111C/CP : supports up to 9KB jumbo frame. 879 */ 880 sc->rl_flags |= RL_FLAG_NOJUMBO; 881 break; 882 case RL_HWREV_8168D: 883 case RL_HWREV_8168E: 884 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 885 RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | 886 RL_FLAG_MACSTAT | RL_FLAG_HWIM | RL_FLAG_CMDSTOP | 887 RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO; 888 break; 889 case RL_HWREV_8168E_VL: 890 case RL_HWREV_8168F: 891 case RL_HWREV_8411: 892 sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE | 893 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 894 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_NOJUMBO; 895 break; 896 case RL_HWREV_8169_8110SB: 897 case RL_HWREV_8169_8110SBL: 898 case RL_HWREV_8169_8110SCd: 899 case RL_HWREV_8169_8110SCe: 900 sc->rl_flags |= RL_FLAG_PHYWAKE; 901 /* FALLTHROUGH */ 902 case RL_HWREV_8169: 903 case RL_HWREV_8169S: 904 case RL_HWREV_8110S: 905 sc->rl_flags |= RL_FLAG_MACLDPS; 906 break; 907 default: 908 break; 909 } 910 911 /* Reset the adapter. */ 912 re_reset(sc); 913 914 sc->rl_tx_time = 5; /* 125us */ 915 sc->rl_rx_time = 2; /* 50us */ 916 if (sc->rl_flags & RL_FLAG_PCIE) 917 sc->rl_sim_time = 75; /* 75us */ 918 else 919 sc->rl_sim_time = 125; /* 125us */ 920 sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */ 921 922 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 923 sc->rl_bus_speed = 33; /* XXX */ 924 else if (sc->rl_flags & RL_FLAG_PCIE) 925 sc->rl_bus_speed = 125; 926 else { 927 u_int8_t cfg2; 928 929 cfg2 = CSR_READ_1(sc, RL_CFG2); 930 switch (cfg2 & RL_CFG2_PCI_MASK) { 931 case RL_CFG2_PCI_33MHZ: 932 sc->rl_bus_speed = 33; 933 break; 934 case RL_CFG2_PCI_66MHZ: 935 sc->rl_bus_speed = 66; 936 break; 937 default: 938 printf("%s: unknown bus speed, assume 33MHz\n", 939 sc->sc_dev.dv_xname); 940 sc->rl_bus_speed = 33; 941 break; 942 } 943 944 if (cfg2 & RL_CFG2_PCI_64BIT) 945 sc->rl_flags |= RL_FLAG_PCI64; 946 } 947 948 re_config_imtype(sc, sc->rl_imtype); 949 950 if (sc->rl_flags & RL_FLAG_PAR) { 951 /* 952 * XXX Should have a better way to extract station 953 * address from EEPROM. 954 */ 955 for (i = 0; i < ETHER_ADDR_LEN; i++) 956 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 957 } else { 958 sc->rl_eewidth = RL_9356_ADDR_LEN; 959 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 960 if (re_did != 0x8129) 961 sc->rl_eewidth = RL_9346_ADDR_LEN; 962 963 /* 964 * Get station address from the EEPROM. 965 */ 966 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 967 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 968 as[i] = letoh16(as[i]); 969 bcopy(as, eaddr, sizeof(eaddr)); 970 971 #ifdef __armish__ 972 /* 973 * On the Thecus N2100, the MAC address in the EEPROM is 974 * always 00:14:fd:10:00:00. The proper MAC address is 975 * stored in flash. Fortunately RedBoot configures the 976 * proper MAC address (for the first onboard interface) 977 * which we can read from the IDR. 978 */ 979 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && 980 eaddr[2] == 0xfd && eaddr[3] == 0x10 && 981 eaddr[4] == 0x00 && eaddr[5] == 0x00) { 982 if (boot_eaddr_valid == 0) { 983 boot_eaddr.eaddr_word[1] = 984 letoh32(CSR_READ_4(sc, RL_IDR4)); 985 boot_eaddr.eaddr_word[0] = 986 letoh32(CSR_READ_4(sc, RL_IDR0)); 987 boot_eaddr_valid = 1; 988 } 989 990 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr)); 991 eaddr[5] += sc->sc_dev.dv_unit; 992 } 993 #endif 994 } 995 996 /* 997 * Set RX length mask, TX poll request register 998 * and TX descriptor count. 999 */ 1000 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) { 1001 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1002 sc->rl_txstart = RL_TXSTART; 1003 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139; 1004 } else { 1005 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1006 sc->rl_txstart = RL_GTXSTART; 1007 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169; 1008 } 1009 1010 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 1011 1012 for (rr = re_revisions; rr->re_name != NULL; rr++) { 1013 if (rr->re_chipid == sc->sc_hwrev) 1014 re_name = rr->re_name; 1015 } 1016 1017 if (re_name == NULL) 1018 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); 1019 else 1020 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); 1021 1022 printf(", %s, address %s\n", intrstr, 1023 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 1024 1025 if (sc->rl_ldata.rl_tx_desc_cnt > 1026 PAGE_SIZE / sizeof(struct rl_desc)) { 1027 sc->rl_ldata.rl_tx_desc_cnt = 1028 PAGE_SIZE / sizeof(struct rl_desc); 1029 } 1030 1031 /* Allocate DMA'able memory for the TX ring */ 1032 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1033 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1, 1034 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT | 1035 BUS_DMA_ZERO)) != 0) { 1036 printf("%s: can't allocate tx listseg, error = %d\n", 1037 sc->sc_dev.dv_xname, error); 1038 goto fail_0; 1039 } 1040 1041 /* Load the map for the TX ring. */ 1042 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg, 1043 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc), 1044 (caddr_t *)&sc->rl_ldata.rl_tx_list, 1045 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 1046 printf("%s: can't map tx list, error = %d\n", 1047 sc->sc_dev.dv_xname, error); 1048 goto fail_1; 1049 } 1050 1051 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1, 1052 RL_TX_LIST_SZ(sc), 0, 0, 1053 &sc->rl_ldata.rl_tx_list_map)) != 0) { 1054 printf("%s: can't create tx list map, error = %d\n", 1055 sc->sc_dev.dv_xname, error); 1056 goto fail_2; 1057 } 1058 1059 if ((error = bus_dmamap_load(sc->sc_dmat, 1060 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1061 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) { 1062 printf("%s: can't load tx list, error = %d\n", 1063 sc->sc_dev.dv_xname, error); 1064 goto fail_3; 1065 } 1066 1067 /* Create DMA maps for TX buffers */ 1068 for (i = 0; i < RL_TX_QLEN; i++) { 1069 error = bus_dmamap_create(sc->sc_dmat, 1070 RL_JUMBO_FRAMELEN, 1071 RL_TX_DESC_CNT(sc) - RL_NTXDESC_RSVD, RL_TDESC_CMD_FRAGLEN, 1072 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap); 1073 if (error) { 1074 printf("%s: can't create DMA map for TX\n", 1075 sc->sc_dev.dv_xname); 1076 goto fail_4; 1077 } 1078 } 1079 1080 /* Allocate DMA'able memory for the RX ring */ 1081 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1082 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1, 1083 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT | 1084 BUS_DMA_ZERO)) != 0) { 1085 printf("%s: can't allocate rx listnseg, error = %d\n", 1086 sc->sc_dev.dv_xname, error); 1087 goto fail_4; 1088 } 1089 1090 /* Load the map for the RX ring. */ 1091 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg, 1092 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ, 1093 (caddr_t *)&sc->rl_ldata.rl_rx_list, 1094 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) { 1095 printf("%s: can't map rx list, error = %d\n", 1096 sc->sc_dev.dv_xname, error); 1097 goto fail_5; 1098 1099 } 1100 1101 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1, 1102 RL_RX_DMAMEM_SZ, 0, 0, 1103 &sc->rl_ldata.rl_rx_list_map)) != 0) { 1104 printf("%s: can't create rx list map, error = %d\n", 1105 sc->sc_dev.dv_xname, error); 1106 goto fail_6; 1107 } 1108 1109 if ((error = bus_dmamap_load(sc->sc_dmat, 1110 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1111 RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) { 1112 printf("%s: can't load rx list, error = %d\n", 1113 sc->sc_dev.dv_xname, error); 1114 goto fail_7; 1115 } 1116 1117 /* Create DMA maps for RX buffers */ 1118 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1119 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1120 0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1121 if (error) { 1122 printf("%s: can't create DMA map for RX\n", 1123 sc->sc_dev.dv_xname); 1124 goto fail_8; 1125 } 1126 } 1127 1128 ifp = &sc->sc_arpcom.ac_if; 1129 ifp->if_softc = sc; 1130 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1131 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1132 ifp->if_ioctl = re_ioctl; 1133 ifp->if_start = re_start; 1134 ifp->if_watchdog = re_watchdog; 1135 if ((sc->rl_flags & RL_FLAG_NOJUMBO) == 0) 1136 ifp->if_hardmtu = RL_JUMBO_MTU; 1137 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); 1138 IFQ_SET_READY(&ifp->if_snd); 1139 1140 m_clsetwms(ifp, MCLBYTES, 2, RL_RX_DESC_CNT); 1141 1142 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 1143 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 1144 1145 #if NVLAN > 0 1146 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1147 #endif 1148 1149 #ifndef SMALL_KERNEL 1150 ifp->if_capabilities |= IFCAP_WOL; 1151 ifp->if_wol = re_wol; 1152 re_wol(ifp, 0); 1153 #endif 1154 timeout_set(&sc->timer_handle, re_tick, sc); 1155 1156 /* Take PHY out of power down mode. */ 1157 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) { 1158 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1159 if (sc->sc_hwrev == RL_HWREV_8401E) 1160 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1161 } 1162 if (sc->rl_flags & RL_FLAG_PHYWAKE) { 1163 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); 1164 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); 1165 } 1166 1167 /* Do MII setup */ 1168 sc->sc_mii.mii_ifp = ifp; 1169 sc->sc_mii.mii_readreg = re_miibus_readreg; 1170 sc->sc_mii.mii_writereg = re_miibus_writereg; 1171 sc->sc_mii.mii_statchg = re_miibus_statchg; 1172 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd, 1173 re_ifmedia_sts); 1174 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1175 MII_OFFSET_ANY, MIIF_DOPAUSE); 1176 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1177 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 1178 ifmedia_add(&sc->sc_mii.mii_media, 1179 IFM_ETHER|IFM_NONE, 0, NULL); 1180 ifmedia_set(&sc->sc_mii.mii_media, 1181 IFM_ETHER|IFM_NONE); 1182 } else 1183 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1184 1185 /* 1186 * Call MI attach routine. 1187 */ 1188 re_reset(sc); 1189 if_attach(ifp); 1190 ether_ifattach(ifp); 1191 1192 #ifdef RE_DIAG 1193 /* 1194 * Perform hardware diagnostic on the original RTL8169. 1195 * Some 32-bit cards were incorrectly wired and would 1196 * malfunction if plugged into a 64-bit slot. 1197 */ 1198 if (sc->sc_hwrev == RL_HWREV_8169) { 1199 error = re_diag(sc); 1200 if (error) { 1201 printf("%s: attach aborted due to hardware diag failure\n", 1202 sc->sc_dev.dv_xname); 1203 ether_ifdetach(ifp); 1204 goto fail_8; 1205 } 1206 } 1207 #endif 1208 1209 return (0); 1210 1211 fail_8: 1212 /* Destroy DMA maps for RX buffers. */ 1213 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1214 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL) 1215 bus_dmamap_destroy(sc->sc_dmat, 1216 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 1217 } 1218 1219 /* Free DMA'able memory for the RX ring. */ 1220 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1221 fail_7: 1222 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map); 1223 fail_6: 1224 bus_dmamem_unmap(sc->sc_dmat, 1225 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ); 1226 fail_5: 1227 bus_dmamem_free(sc->sc_dmat, 1228 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg); 1229 1230 fail_4: 1231 /* Destroy DMA maps for TX buffers. */ 1232 for (i = 0; i < RL_TX_QLEN; i++) { 1233 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL) 1234 bus_dmamap_destroy(sc->sc_dmat, 1235 sc->rl_ldata.rl_txq[i].txq_dmamap); 1236 } 1237 1238 /* Free DMA'able memory for the TX ring. */ 1239 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1240 fail_3: 1241 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map); 1242 fail_2: 1243 bus_dmamem_unmap(sc->sc_dmat, 1244 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc)); 1245 fail_1: 1246 bus_dmamem_free(sc->sc_dmat, 1247 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg); 1248 fail_0: 1249 return (1); 1250 } 1251 1252 1253 int 1254 re_newbuf(struct rl_softc *sc) 1255 { 1256 struct mbuf *m; 1257 bus_dmamap_t map; 1258 struct rl_desc *d; 1259 struct rl_rxsoft *rxs; 1260 u_int32_t cmdstat; 1261 int error, idx; 1262 1263 m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES); 1264 if (!m) 1265 return (ENOBUFS); 1266 1267 /* 1268 * Initialize mbuf length fields and fixup 1269 * alignment so that the frame payload is 1270 * longword aligned on strict alignment archs. 1271 */ 1272 m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN; 1273 m->m_data += RE_ETHER_ALIGN; 1274 1275 idx = sc->rl_ldata.rl_rx_prodidx; 1276 rxs = &sc->rl_ldata.rl_rxsoft[idx]; 1277 map = rxs->rxs_dmamap; 1278 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1279 BUS_DMA_READ|BUS_DMA_NOWAIT); 1280 if (error) { 1281 m_freem(m); 1282 return (ENOBUFS); 1283 } 1284 1285 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1286 BUS_DMASYNC_PREREAD); 1287 1288 d = &sc->rl_ldata.rl_rx_list[idx]; 1289 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1290 cmdstat = letoh32(d->rl_cmdstat); 1291 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1292 if (cmdstat & RL_RDESC_STAT_OWN) { 1293 printf("%s: tried to map busy RX descriptor\n", 1294 sc->sc_dev.dv_xname); 1295 m_freem(m); 1296 return (ENOBUFS); 1297 } 1298 1299 rxs->rxs_mbuf = m; 1300 1301 d->rl_vlanctl = 0; 1302 cmdstat = map->dm_segs[0].ds_len; 1303 if (idx == (RL_RX_DESC_CNT - 1)) 1304 cmdstat |= RL_RDESC_CMD_EOR; 1305 re_set_bufaddr(d, map->dm_segs[0].ds_addr); 1306 d->rl_cmdstat = htole32(cmdstat); 1307 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1308 cmdstat |= RL_RDESC_CMD_OWN; 1309 d->rl_cmdstat = htole32(cmdstat); 1310 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1311 1312 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx); 1313 sc->rl_ldata.rl_rx_cnt++; 1314 1315 return (0); 1316 } 1317 1318 1319 int 1320 re_tx_list_init(struct rl_softc *sc) 1321 { 1322 int i; 1323 1324 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc)); 1325 for (i = 0; i < RL_TX_QLEN; i++) { 1326 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 1327 } 1328 1329 bus_dmamap_sync(sc->sc_dmat, 1330 sc->rl_ldata.rl_tx_list_map, 0, 1331 sc->rl_ldata.rl_tx_list_map->dm_mapsize, 1332 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1333 sc->rl_ldata.rl_txq_prodidx = 0; 1334 sc->rl_ldata.rl_txq_considx = 0; 1335 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc); 1336 sc->rl_ldata.rl_tx_nextfree = 0; 1337 1338 return (0); 1339 } 1340 1341 int 1342 re_rx_list_init(struct rl_softc *sc) 1343 { 1344 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1345 1346 sc->rl_ldata.rl_rx_prodidx = 0; 1347 sc->rl_ldata.rl_rx_considx = 0; 1348 sc->rl_ldata.rl_rx_cnt = 0; 1349 sc->rl_head = sc->rl_tail = NULL; 1350 1351 re_rx_list_fill(sc); 1352 1353 return (0); 1354 } 1355 1356 void 1357 re_rx_list_fill(struct rl_softc *sc) 1358 { 1359 while (sc->rl_ldata.rl_rx_cnt < RL_RX_DESC_CNT) { 1360 if (re_newbuf(sc) == ENOBUFS) 1361 break; 1362 } 1363 } 1364 1365 /* 1366 * RX handler for C+ and 8169. For the gigE chips, we support 1367 * the reception of jumbo frames that have been fragmented 1368 * across multiple 2K mbuf cluster buffers. 1369 */ 1370 int 1371 re_rxeof(struct rl_softc *sc) 1372 { 1373 struct mbuf *m; 1374 struct ifnet *ifp; 1375 int i, total_len, rx = 0; 1376 struct rl_desc *cur_rx; 1377 struct rl_rxsoft *rxs; 1378 u_int32_t rxstat, rxvlan; 1379 1380 ifp = &sc->sc_arpcom.ac_if; 1381 1382 for (i = sc->rl_ldata.rl_rx_considx; sc->rl_ldata.rl_rx_cnt > 0; 1383 i = RL_NEXT_RX_DESC(sc, i)) { 1384 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1385 RL_RXDESCSYNC(sc, i, 1386 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1387 rxstat = letoh32(cur_rx->rl_cmdstat); 1388 rxvlan = letoh32(cur_rx->rl_vlanctl); 1389 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD); 1390 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1391 break; 1392 total_len = rxstat & sc->rl_rxlenmask; 1393 rxs = &sc->rl_ldata.rl_rxsoft[i]; 1394 m = rxs->rxs_mbuf; 1395 rxs->rxs_mbuf = NULL; 1396 sc->rl_ldata.rl_rx_cnt--; 1397 rx = 1; 1398 1399 /* Invalidate the RX mbuf and unload its map */ 1400 1401 bus_dmamap_sync(sc->sc_dmat, 1402 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, 1403 BUS_DMASYNC_POSTREAD); 1404 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1405 1406 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1407 m->m_len = RE_RX_DESC_BUFLEN; 1408 if (sc->rl_head == NULL) 1409 sc->rl_head = sc->rl_tail = m; 1410 else { 1411 m->m_flags &= ~M_PKTHDR; 1412 sc->rl_tail->m_next = m; 1413 sc->rl_tail = m; 1414 } 1415 continue; 1416 } 1417 1418 /* 1419 * NOTE: for the 8139C+, the frame length field 1420 * is always 12 bits in size, but for the gigE chips, 1421 * it is 13 bits (since the max RX frame length is 16K). 1422 * Unfortunately, all 32 bits in the status word 1423 * were already used, so to make room for the extra 1424 * length bit, RealTek took out the 'frame alignment 1425 * error' bit and shifted the other status bits 1426 * over one slot. The OWN, EOR, FS and LS bits are 1427 * still in the same places. We have already extracted 1428 * the frame length and checked the OWN bit, so rather 1429 * than using an alternate bit mapping, we shift the 1430 * status bits one space to the right so we can evaluate 1431 * them using the 8169 status as though it was in the 1432 * same format as that of the 8139C+. 1433 */ 1434 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 1435 rxstat >>= 1; 1436 1437 /* 1438 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1439 * set, but if CRC is clear, it will still be a valid frame. 1440 */ 1441 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1442 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1443 ifp->if_ierrors++; 1444 /* 1445 * If this is part of a multi-fragment packet, 1446 * discard all the pieces. 1447 */ 1448 if (sc->rl_head != NULL) { 1449 m_freem(sc->rl_head); 1450 sc->rl_head = sc->rl_tail = NULL; 1451 } 1452 continue; 1453 } 1454 1455 if (sc->rl_head != NULL) { 1456 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1457 if (m->m_len == 0) 1458 m->m_len = RE_RX_DESC_BUFLEN; 1459 /* 1460 * Special case: if there's 4 bytes or less 1461 * in this buffer, the mbuf can be discarded: 1462 * the last 4 bytes is the CRC, which we don't 1463 * care about anyway. 1464 */ 1465 if (m->m_len <= ETHER_CRC_LEN) { 1466 sc->rl_tail->m_len -= 1467 (ETHER_CRC_LEN - m->m_len); 1468 m_freem(m); 1469 } else { 1470 m->m_len -= ETHER_CRC_LEN; 1471 m->m_flags &= ~M_PKTHDR; 1472 sc->rl_tail->m_next = m; 1473 } 1474 m = sc->rl_head; 1475 sc->rl_head = sc->rl_tail = NULL; 1476 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1477 } else 1478 m->m_pkthdr.len = m->m_len = 1479 (total_len - ETHER_CRC_LEN); 1480 1481 ifp->if_ipackets++; 1482 m->m_pkthdr.rcvif = ifp; 1483 1484 /* Do RX checksumming */ 1485 1486 if (sc->rl_flags & RL_FLAG_DESCV2) { 1487 /* Check IP header checksum */ 1488 if ((rxvlan & RL_RDESC_IPV4) && 1489 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1490 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1491 1492 /* Check TCP/UDP checksum */ 1493 if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) && 1494 (((rxstat & RL_RDESC_STAT_TCP) && 1495 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1496 ((rxstat & RL_RDESC_STAT_UDP) && 1497 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))) 1498 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1499 M_UDP_CSUM_IN_OK; 1500 } else { 1501 /* Check IP header checksum */ 1502 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1503 !(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1504 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1505 1506 /* Check TCP/UDP checksum */ 1507 if ((RL_TCPPKT(rxstat) && 1508 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1509 (RL_UDPPKT(rxstat) && 1510 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) 1511 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1512 M_UDP_CSUM_IN_OK; 1513 } 1514 #if NVLAN > 0 1515 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1516 m->m_pkthdr.ether_vtag = 1517 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1518 m->m_flags |= M_VLANTAG; 1519 } 1520 #endif 1521 1522 #if NBPFILTER > 0 1523 if (ifp->if_bpf) 1524 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1525 #endif 1526 ether_input_mbuf(ifp, m); 1527 } 1528 1529 sc->rl_ldata.rl_rx_considx = i; 1530 re_rx_list_fill(sc); 1531 1532 return (rx); 1533 } 1534 1535 int 1536 re_txeof(struct rl_softc *sc) 1537 { 1538 struct ifnet *ifp; 1539 struct rl_txq *txq; 1540 uint32_t txstat; 1541 int idx, descidx, tx = 0; 1542 1543 ifp = &sc->sc_arpcom.ac_if; 1544 1545 for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) { 1546 txq = &sc->rl_ldata.rl_txq[idx]; 1547 1548 if (txq->txq_mbuf == NULL) { 1549 KASSERT(idx == sc->rl_ldata.rl_txq_prodidx); 1550 break; 1551 } 1552 1553 descidx = txq->txq_descidx; 1554 RL_TXDESCSYNC(sc, descidx, 1555 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1556 txstat = 1557 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); 1558 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); 1559 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); 1560 if (txstat & RL_TDESC_CMD_OWN) 1561 break; 1562 1563 tx = 1; 1564 sc->rl_ldata.rl_tx_free += txq->txq_nsegs; 1565 KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc)); 1566 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 1567 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1568 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1569 m_freem(txq->txq_mbuf); 1570 txq->txq_mbuf = NULL; 1571 1572 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT)) 1573 ifp->if_collisions++; 1574 if (txstat & RL_TDESC_STAT_TXERRSUM) 1575 ifp->if_oerrors++; 1576 else 1577 ifp->if_opackets++; 1578 } 1579 1580 sc->rl_ldata.rl_txq_considx = idx; 1581 1582 if (sc->rl_ldata.rl_tx_free > RL_NTXDESC_RSVD) 1583 ifp->if_flags &= ~IFF_OACTIVE; 1584 1585 /* 1586 * Some chips will ignore a second TX request issued while an 1587 * existing transmission is in progress. If the transmitter goes 1588 * idle but there are still packets waiting to be sent, we need 1589 * to restart the channel here to flush them out. This only 1590 * seems to be required with the PCIe devices. 1591 */ 1592 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc)) 1593 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1594 else 1595 ifp->if_timer = 0; 1596 1597 return (tx); 1598 } 1599 1600 void 1601 re_tick(void *xsc) 1602 { 1603 struct rl_softc *sc = xsc; 1604 struct mii_data *mii; 1605 struct ifnet *ifp; 1606 int s; 1607 1608 ifp = &sc->sc_arpcom.ac_if; 1609 mii = &sc->sc_mii; 1610 1611 s = splnet(); 1612 1613 mii_tick(mii); 1614 if (sc->rl_flags & RL_FLAG_LINK) { 1615 if (!(mii->mii_media_status & IFM_ACTIVE)) 1616 sc->rl_flags &= ~RL_FLAG_LINK; 1617 } else { 1618 if (mii->mii_media_status & IFM_ACTIVE && 1619 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1620 sc->rl_flags |= RL_FLAG_LINK; 1621 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1622 re_start(ifp); 1623 } 1624 } 1625 splx(s); 1626 1627 timeout_add_sec(&sc->timer_handle, 1); 1628 } 1629 1630 int 1631 re_intr(void *arg) 1632 { 1633 struct rl_softc *sc = arg; 1634 struct ifnet *ifp; 1635 u_int16_t status; 1636 int claimed = 0, rx, tx; 1637 1638 ifp = &sc->sc_arpcom.ac_if; 1639 1640 if (!(ifp->if_flags & IFF_RUNNING)) 1641 return (0); 1642 1643 rx = tx = 0; 1644 status = CSR_READ_2(sc, RL_ISR); 1645 /* If the card has gone away the read returns 0xffff. */ 1646 if (status == 0xffff) 1647 return (0); 1648 if (status) 1649 CSR_WRITE_2(sc, RL_ISR, status); 1650 1651 if (status & RL_ISR_TIMEOUT_EXPIRED) 1652 claimed = 1; 1653 1654 if (status & RL_INTRS_CPLUS) { 1655 if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) { 1656 rx |= re_rxeof(sc); 1657 claimed = 1; 1658 } 1659 1660 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) { 1661 tx |= re_txeof(sc); 1662 claimed = 1; 1663 } 1664 1665 if (status & RL_ISR_SYSTEM_ERR) { 1666 re_reset(sc); 1667 re_init(ifp); 1668 claimed = 1; 1669 } 1670 1671 if (status & RL_ISR_LINKCHG) { 1672 timeout_del(&sc->timer_handle); 1673 re_tick(sc); 1674 claimed = 1; 1675 } 1676 } 1677 1678 if (sc->rl_imtype == RL_IMTYPE_SIM) { 1679 if ((sc->rl_flags & RL_FLAG_TIMERINTR)) { 1680 if ((tx | rx) == 0) { 1681 /* 1682 * Nothing needs to be processed, fallback 1683 * to use TX/RX interrupts. 1684 */ 1685 re_setup_intr(sc, 1, RL_IMTYPE_NONE); 1686 1687 /* 1688 * Recollect, mainly to avoid the possible 1689 * race introduced by changing interrupt 1690 * masks. 1691 */ 1692 re_rxeof(sc); 1693 tx = re_txeof(sc); 1694 } else 1695 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 1696 } else if (tx | rx) { 1697 /* 1698 * Assume that using simulated interrupt moderation 1699 * (hardware timer based) could reduce the interrupt 1700 * rate. 1701 */ 1702 re_setup_intr(sc, 1, RL_IMTYPE_SIM); 1703 } 1704 } 1705 1706 if (tx && !IFQ_IS_EMPTY(&ifp->if_snd)) 1707 re_start(ifp); 1708 1709 return (claimed); 1710 } 1711 1712 int 1713 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx) 1714 { 1715 bus_dmamap_t map; 1716 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; 1717 struct rl_desc *d; 1718 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; 1719 struct rl_txq *txq; 1720 1721 if (sc->rl_ldata.rl_tx_free <= RL_NTXDESC_RSVD) 1722 return (EFBIG); 1723 1724 /* 1725 * Set up checksum offload. Note: checksum offload bits must 1726 * appear in all descriptors of a multi-descriptor transmit 1727 * attempt. This is according to testing done with an 8169 1728 * chip. This is a requirement. 1729 */ 1730 1731 /* 1732 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading 1733 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ 1734 * RL_TDESC_CMD_UDPCSUM does not take affect. 1735 */ 1736 1737 if ((m->m_pkthdr.csum_flags & 1738 (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { 1739 if (sc->rl_flags & RL_FLAG_DESCV2) { 1740 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 1741 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1742 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 1743 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1744 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 1745 } else { 1746 csum_flags |= RL_TDESC_CMD_IPCSUM; 1747 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1748 csum_flags |= RL_TDESC_CMD_TCPCSUM; 1749 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1750 csum_flags |= RL_TDESC_CMD_UDPCSUM; 1751 } 1752 } 1753 1754 txq = &sc->rl_ldata.rl_txq[*idx]; 1755 map = txq->txq_dmamap; 1756 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1757 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1758 if (error) { 1759 /* XXX try to defrag if EFBIG? */ 1760 printf("%s: can't map mbuf (error %d)\n", 1761 sc->sc_dev.dv_xname, error); 1762 return (error); 1763 } 1764 1765 nsegs = map->dm_nsegs; 1766 pad = 0; 1767 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 && 1768 m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN && 1769 (csum_flags & RL_TDESC_CMD_IPCSUM) != 0) { 1770 pad = 1; 1771 nsegs++; 1772 } 1773 1774 if (nsegs > sc->rl_ldata.rl_tx_free - RL_NTXDESC_RSVD) { 1775 error = EFBIG; 1776 goto fail_unload; 1777 } 1778 1779 /* 1780 * Make sure that the caches are synchronized before we 1781 * ask the chip to start DMA for the packet data. 1782 */ 1783 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1784 BUS_DMASYNC_PREWRITE); 1785 1786 /* 1787 * Set up hardware VLAN tagging. Note: vlan tag info must 1788 * appear in all descriptors of a multi-descriptor 1789 * transmission attempt. 1790 */ 1791 #if NVLAN > 0 1792 if (m->m_flags & M_VLANTAG) 1793 vlanctl |= swap16(m->m_pkthdr.ether_vtag) | 1794 RL_TDESC_VLANCTL_TAG; 1795 #endif 1796 1797 /* 1798 * Map the segment array into descriptors. Note that we set the 1799 * start-of-frame and end-of-frame markers for either TX or RX, but 1800 * they really only have meaning in the TX case. (In the RX case, 1801 * it's the chip that tells us where packets begin and end.) 1802 * We also keep track of the end of the ring and set the 1803 * end-of-ring bits as needed, and we set the ownership bits 1804 * in all except the very first descriptor. (The caller will 1805 * set this descriptor later when it start transmission or 1806 * reception.) 1807 */ 1808 curidx = startidx = sc->rl_ldata.rl_tx_nextfree; 1809 lastidx = -1; 1810 for (seg = 0; seg < map->dm_nsegs; 1811 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { 1812 d = &sc->rl_ldata.rl_tx_list[curidx]; 1813 RL_TXDESCSYNC(sc, curidx, 1814 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1815 cmdstat = letoh32(d->rl_cmdstat); 1816 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); 1817 if (cmdstat & RL_TDESC_STAT_OWN) { 1818 printf("%s: tried to map busy TX descriptor\n", 1819 sc->sc_dev.dv_xname); 1820 for (; seg > 0; seg --) { 1821 uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) % 1822 RL_TX_DESC_CNT(sc); 1823 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; 1824 RL_TXDESCSYNC(sc, uidx, 1825 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1826 } 1827 error = ENOBUFS; 1828 goto fail_unload; 1829 } 1830 1831 d->rl_vlanctl = htole32(vlanctl); 1832 re_set_bufaddr(d, map->dm_segs[seg].ds_addr); 1833 cmdstat = csum_flags | map->dm_segs[seg].ds_len; 1834 if (seg == 0) 1835 cmdstat |= RL_TDESC_CMD_SOF; 1836 else 1837 cmdstat |= RL_TDESC_CMD_OWN; 1838 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1839 cmdstat |= RL_TDESC_CMD_EOR; 1840 if (seg == nsegs - 1) { 1841 cmdstat |= RL_TDESC_CMD_EOF; 1842 lastidx = curidx; 1843 } 1844 d->rl_cmdstat = htole32(cmdstat); 1845 RL_TXDESCSYNC(sc, curidx, 1846 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1847 } 1848 if (pad) { 1849 d = &sc->rl_ldata.rl_tx_list[curidx]; 1850 d->rl_vlanctl = htole32(vlanctl); 1851 re_set_bufaddr(d, RL_TXPADDADDR(sc)); 1852 cmdstat = csum_flags | 1853 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | 1854 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); 1855 if (curidx == (RL_TX_DESC_CNT(sc) - 1)) 1856 cmdstat |= RL_TDESC_CMD_EOR; 1857 d->rl_cmdstat = htole32(cmdstat); 1858 RL_TXDESCSYNC(sc, curidx, 1859 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1860 lastidx = curidx; 1861 curidx = RL_NEXT_TX_DESC(sc, curidx); 1862 } 1863 KASSERT(lastidx != -1); 1864 1865 /* Transfer ownership of packet to the chip. */ 1866 1867 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= 1868 htole32(RL_TDESC_CMD_OWN); 1869 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1870 1871 /* update info of TX queue and descriptors */ 1872 txq->txq_mbuf = m; 1873 txq->txq_descidx = lastidx; 1874 txq->txq_nsegs = nsegs; 1875 1876 sc->rl_ldata.rl_tx_free -= nsegs; 1877 sc->rl_ldata.rl_tx_nextfree = curidx; 1878 1879 *idx = RL_NEXT_TXQ(sc, *idx); 1880 1881 return (0); 1882 1883 fail_unload: 1884 bus_dmamap_unload(sc->sc_dmat, map); 1885 1886 return (error); 1887 } 1888 1889 /* 1890 * Main transmit routine for C+ and gigE NICs. 1891 */ 1892 1893 void 1894 re_start(struct ifnet *ifp) 1895 { 1896 struct rl_softc *sc; 1897 int idx, queued = 0; 1898 1899 sc = ifp->if_softc; 1900 1901 if (ifp->if_flags & IFF_OACTIVE) 1902 return; 1903 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1904 return; 1905 1906 idx = sc->rl_ldata.rl_txq_prodidx; 1907 for (;;) { 1908 struct mbuf *m; 1909 int error; 1910 1911 IFQ_POLL(&ifp->if_snd, m); 1912 if (m == NULL) 1913 break; 1914 1915 if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) { 1916 KASSERT(idx == sc->rl_ldata.rl_txq_considx); 1917 ifp->if_flags |= IFF_OACTIVE; 1918 break; 1919 } 1920 1921 error = re_encap(sc, m, &idx); 1922 if (error == EFBIG && 1923 sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) { 1924 IFQ_DEQUEUE(&ifp->if_snd, m); 1925 m_freem(m); 1926 ifp->if_oerrors++; 1927 continue; 1928 } 1929 if (error) { 1930 ifp->if_flags |= IFF_OACTIVE; 1931 break; 1932 } 1933 1934 IFQ_DEQUEUE(&ifp->if_snd, m); 1935 queued++; 1936 1937 #if NBPFILTER > 0 1938 /* 1939 * If there's a BPF listener, bounce a copy of this frame 1940 * to him. 1941 */ 1942 if (ifp->if_bpf) 1943 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1944 #endif 1945 } 1946 1947 if (queued == 0) 1948 return; 1949 1950 sc->rl_ldata.rl_txq_prodidx = idx; 1951 1952 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1953 1954 /* 1955 * Set a timeout in case the chip goes out to lunch. 1956 */ 1957 ifp->if_timer = 5; 1958 } 1959 1960 int 1961 re_init(struct ifnet *ifp) 1962 { 1963 struct rl_softc *sc = ifp->if_softc; 1964 u_int16_t cfg; 1965 int s; 1966 union { 1967 u_int32_t align_dummy; 1968 u_char eaddr[ETHER_ADDR_LEN]; 1969 } eaddr; 1970 1971 s = splnet(); 1972 1973 /* 1974 * Cancel pending I/O and free all RX/TX buffers. 1975 */ 1976 re_stop(ifp); 1977 1978 /* 1979 * Enable C+ RX and TX mode, as well as RX checksum offload. 1980 * We must configure the C+ register before all others. 1981 */ 1982 cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW; 1983 1984 if (ifp->if_capabilities & IFCAP_CSUM_IPv4) 1985 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 1986 1987 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1988 cfg |= RL_CPLUSCMD_VLANSTRIP; 1989 1990 if (sc->rl_flags & RL_FLAG_MACSTAT) 1991 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 1992 else 1993 cfg |= RL_CPLUSCMD_RXENB; 1994 1995 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 1996 1997 /* 1998 * Init our MAC address. Even though the chipset 1999 * documentation doesn't mention it, we need to enter "Config 2000 * register write enable" mode to modify the ID registers. 2001 */ 2002 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN); 2003 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2004 CSR_WRITE_4(sc, RL_IDR4, 2005 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 2006 CSR_WRITE_4(sc, RL_IDR0, 2007 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 2008 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2009 2010 /* 2011 * For C+ mode, initialize the RX descriptors and mbufs. 2012 */ 2013 re_rx_list_init(sc); 2014 re_tx_list_init(sc); 2015 2016 /* 2017 * Load the addresses of the RX and TX lists into the chip. 2018 */ 2019 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2020 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 2021 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2022 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr)); 2023 2024 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2025 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 2026 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2027 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr)); 2028 2029 /* 2030 * Enable transmit and receive. 2031 */ 2032 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2033 2034 /* 2035 * Set the initial TX and RX configuration. 2036 */ 2037 if (sc->rl_testmode) { 2038 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2039 CSR_WRITE_4(sc, RL_TXCFG, 2040 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 2041 else 2042 CSR_WRITE_4(sc, RL_TXCFG, 2043 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 2044 } else 2045 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2046 2047 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 2048 2049 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2050 2051 /* Program promiscuous mode and multicast filters. */ 2052 re_iff(sc); 2053 2054 /* 2055 * Enable interrupts. 2056 */ 2057 if (sc->rl_testmode) 2058 CSR_WRITE_2(sc, RL_IMR, 0); 2059 else 2060 re_setup_intr(sc, 1, sc->rl_imtype); 2061 CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype); 2062 2063 /* Start RX/TX process. */ 2064 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2065 #ifdef notdef 2066 /* Enable receiver and transmitter. */ 2067 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2068 #endif 2069 2070 /* 2071 * For 8169 gigE NICs, set the max allowed RX packet 2072 * size so we can receive jumbo frames. 2073 */ 2074 if (sc->sc_hwrev != RL_HWREV_8139CPLUS) 2075 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2076 2077 if (sc->rl_testmode) { 2078 splx(s); 2079 return (0); 2080 } 2081 2082 mii_mediachg(&sc->sc_mii); 2083 2084 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 2085 2086 ifp->if_flags |= IFF_RUNNING; 2087 ifp->if_flags &= ~IFF_OACTIVE; 2088 2089 splx(s); 2090 2091 sc->rl_flags &= ~RL_FLAG_LINK; 2092 2093 timeout_add_sec(&sc->timer_handle, 1); 2094 2095 return (0); 2096 } 2097 2098 /* 2099 * Set media options. 2100 */ 2101 int 2102 re_ifmedia_upd(struct ifnet *ifp) 2103 { 2104 struct rl_softc *sc; 2105 2106 sc = ifp->if_softc; 2107 2108 return (mii_mediachg(&sc->sc_mii)); 2109 } 2110 2111 /* 2112 * Report current media status. 2113 */ 2114 void 2115 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2116 { 2117 struct rl_softc *sc; 2118 2119 sc = ifp->if_softc; 2120 2121 mii_pollstat(&sc->sc_mii); 2122 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2123 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2124 } 2125 2126 int 2127 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2128 { 2129 struct rl_softc *sc = ifp->if_softc; 2130 struct ifreq *ifr = (struct ifreq *) data; 2131 struct ifaddr *ifa = (struct ifaddr *)data; 2132 int s, error = 0; 2133 2134 s = splnet(); 2135 2136 switch(command) { 2137 case SIOCSIFADDR: 2138 ifp->if_flags |= IFF_UP; 2139 if (!(ifp->if_flags & IFF_RUNNING)) 2140 re_init(ifp); 2141 #ifdef INET 2142 if (ifa->ifa_addr->sa_family == AF_INET) 2143 arp_ifinit(&sc->sc_arpcom, ifa); 2144 #endif /* INET */ 2145 break; 2146 case SIOCSIFFLAGS: 2147 if (ifp->if_flags & IFF_UP) { 2148 if (ifp->if_flags & IFF_RUNNING) 2149 error = ENETRESET; 2150 else 2151 re_init(ifp); 2152 } else { 2153 if (ifp->if_flags & IFF_RUNNING) 2154 re_stop(ifp); 2155 } 2156 break; 2157 case SIOCGIFMEDIA: 2158 case SIOCSIFMEDIA: 2159 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 2160 break; 2161 default: 2162 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 2163 } 2164 2165 if (error == ENETRESET) { 2166 if (ifp->if_flags & IFF_RUNNING) 2167 re_iff(sc); 2168 error = 0; 2169 } 2170 2171 splx(s); 2172 return (error); 2173 } 2174 2175 void 2176 re_watchdog(struct ifnet *ifp) 2177 { 2178 struct rl_softc *sc; 2179 int s; 2180 2181 sc = ifp->if_softc; 2182 s = splnet(); 2183 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 2184 ifp->if_oerrors++; 2185 2186 re_txeof(sc); 2187 re_rxeof(sc); 2188 2189 re_init(ifp); 2190 2191 splx(s); 2192 } 2193 2194 /* 2195 * Stop the adapter and free any mbufs allocated to the 2196 * RX and TX lists. 2197 */ 2198 void 2199 re_stop(struct ifnet *ifp) 2200 { 2201 struct rl_softc *sc; 2202 int i; 2203 2204 sc = ifp->if_softc; 2205 2206 ifp->if_timer = 0; 2207 sc->rl_flags &= ~(RL_FLAG_LINK|RL_FLAG_TIMERINTR); 2208 2209 timeout_del(&sc->timer_handle); 2210 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2211 2212 mii_down(&sc->sc_mii); 2213 2214 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2215 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2216 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2217 2218 if (sc->rl_head != NULL) { 2219 m_freem(sc->rl_head); 2220 sc->rl_head = sc->rl_tail = NULL; 2221 } 2222 2223 /* Free the TX list buffers. */ 2224 for (i = 0; i < RL_TX_QLEN; i++) { 2225 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) { 2226 bus_dmamap_unload(sc->sc_dmat, 2227 sc->rl_ldata.rl_txq[i].txq_dmamap); 2228 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); 2229 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL; 2230 } 2231 } 2232 2233 /* Free the RX list buffers. */ 2234 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2235 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) { 2236 bus_dmamap_unload(sc->sc_dmat, 2237 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap); 2238 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); 2239 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL; 2240 } 2241 } 2242 } 2243 2244 void 2245 re_setup_hw_im(struct rl_softc *sc) 2246 { 2247 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2248 2249 /* 2250 * Interrupt moderation 2251 * 2252 * 0xABCD 2253 * A - unknown (maybe TX related) 2254 * B - TX timer (unit: 25us) 2255 * C - unknown (maybe RX related) 2256 * D - RX timer (unit: 25us) 2257 * 2258 * 2259 * re(4)'s interrupt moderation is actually controlled by 2260 * two variables, like most other NICs (bge, bnx etc.) 2261 * o timer 2262 * o number of packets [P] 2263 * 2264 * The logic relationship between these two variables is 2265 * similar to other NICs too: 2266 * if (timer expire || packets > [P]) 2267 * Interrupt is delivered 2268 * 2269 * Currently we only know how to set 'timer', but not 2270 * 'number of packets', which should be ~30, as far as I 2271 * tested (sink ~900Kpps, interrupt rate is 30KHz) 2272 */ 2273 CSR_WRITE_2(sc, RL_IM, 2274 RL_IM_RXTIME(sc->rl_rx_time) | 2275 RL_IM_TXTIME(sc->rl_tx_time) | 2276 RL_IM_MAGIC); 2277 } 2278 2279 void 2280 re_disable_hw_im(struct rl_softc *sc) 2281 { 2282 if (sc->rl_flags & RL_FLAG_HWIM) 2283 CSR_WRITE_2(sc, RL_IM, 0); 2284 } 2285 2286 void 2287 re_setup_sim_im(struct rl_softc *sc) 2288 { 2289 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2290 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */ 2291 else { 2292 u_int32_t ticks; 2293 2294 /* 2295 * Datasheet says tick decreases at bus speed, 2296 * but it seems the clock runs a little bit 2297 * faster, so we do some compensation here. 2298 */ 2299 ticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; 2300 CSR_WRITE_4(sc, RL_TIMERINT_8169, ticks); 2301 } 2302 CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */ 2303 sc->rl_flags |= RL_FLAG_TIMERINTR; 2304 } 2305 2306 void 2307 re_disable_sim_im(struct rl_softc *sc) 2308 { 2309 if (sc->sc_hwrev == RL_HWREV_8139CPLUS) 2310 CSR_WRITE_4(sc, RL_TIMERINT, 0); 2311 else 2312 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0); 2313 sc->rl_flags &= ~RL_FLAG_TIMERINTR; 2314 } 2315 2316 void 2317 re_config_imtype(struct rl_softc *sc, int imtype) 2318 { 2319 switch (imtype) { 2320 case RL_IMTYPE_HW: 2321 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2322 /* FALLTHROUGH */ 2323 case RL_IMTYPE_NONE: 2324 sc->rl_intrs = RL_INTRS_CPLUS; 2325 sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW | 2326 RL_ISR_RX_OVERRUN; 2327 sc->rl_tx_ack = RL_ISR_TX_OK; 2328 break; 2329 2330 case RL_IMTYPE_SIM: 2331 sc->rl_intrs = RL_INTRS_TIMER; 2332 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED; 2333 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED; 2334 break; 2335 2336 default: 2337 panic("%s: unknown imtype %d", 2338 sc->sc_dev.dv_xname, imtype); 2339 } 2340 } 2341 2342 void 2343 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) 2344 { 2345 re_config_imtype(sc, imtype); 2346 2347 if (enable_intrs) 2348 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs); 2349 else 2350 CSR_WRITE_2(sc, RL_IMR, 0); 2351 2352 switch (imtype) { 2353 case RL_IMTYPE_NONE: 2354 re_disable_sim_im(sc); 2355 re_disable_hw_im(sc); 2356 break; 2357 2358 case RL_IMTYPE_HW: 2359 KASSERT(sc->rl_flags & RL_FLAG_HWIM); 2360 re_disable_sim_im(sc); 2361 re_setup_hw_im(sc); 2362 break; 2363 2364 case RL_IMTYPE_SIM: 2365 re_disable_hw_im(sc); 2366 re_setup_sim_im(sc); 2367 break; 2368 2369 default: 2370 panic("%s: unknown imtype %d", 2371 sc->sc_dev.dv_xname, imtype); 2372 } 2373 } 2374 2375 #ifndef SMALL_KERNEL 2376 int 2377 re_wol(struct ifnet *ifp, int enable) 2378 { 2379 struct rl_softc *sc = ifp->if_softc; 2380 int i; 2381 u_int8_t val; 2382 struct re_wolcfg { 2383 u_int8_t enable; 2384 u_int8_t reg; 2385 u_int8_t bit; 2386 } re_wolcfg[] = { 2387 /* Always disable all wake events expect magic packet. */ 2388 { 0, RL_CFG5, RL_CFG5_WOL_UCAST }, 2389 { 0, RL_CFG5, RL_CFG5_WOL_MCAST }, 2390 { 0, RL_CFG5, RL_CFG5_WOL_BCAST }, 2391 { 1, RL_CFG3, RL_CFG3_WOL_MAGIC }, 2392 { 0, RL_CFG3, RL_CFG3_WOL_LINK } 2393 }; 2394 2395 if (enable) { 2396 if ((CSR_READ_1(sc, RL_CFG1) & RL_CFG1_PME) == 0) { 2397 printf("%s: power management is disabled, " 2398 "cannot do WOL\n", sc->sc_dev.dv_xname); 2399 return (ENOTSUP); 2400 } 2401 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_AUXPWR) == 0) 2402 printf("%s: no auxiliary power, cannot do WOL from D3 " 2403 "(power-off) state\n", sc->sc_dev.dv_xname); 2404 } 2405 2406 /* Temporarily enable write to configuration registers. */ 2407 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2408 2409 for (i = 0; i < nitems(re_wolcfg); i++) { 2410 val = CSR_READ_1(sc, re_wolcfg[i].reg); 2411 if (enable && re_wolcfg[i].enable) 2412 val |= re_wolcfg[i].bit; 2413 else 2414 val &= ~re_wolcfg[i].bit; 2415 CSR_WRITE_1(sc, re_wolcfg[i].reg, val); 2416 } 2417 2418 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2419 2420 return (0); 2421 } 2422 #endif 2423