1 /* 2 * Copyright (c) 2004 3 * Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 4 * 5 * Copyright (c) 1997, 1998-2003 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $ 36 * $DragonFly: src/sys/dev/netif/re/if_re.c,v 1.19 2005/11/28 17:13:43 dillon Exp $ 37 */ 38 39 /* 40 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver 41 * 42 * Written by Bill Paul <wpaul@windriver.com> 43 * Senior Networking Software Engineer 44 * Wind River Systems 45 */ 46 47 /* 48 * This driver is designed to support RealTek's next generation of 49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 50 * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S 51 * and the RTL8110S. 52 * 53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 54 * with the older 8139 family, however it also supports a special 55 * C+ mode of operation that provides several new performance enhancing 56 * features. These include: 57 * 58 * o Descriptor based DMA mechanism. Each descriptor represents 59 * a single packet fragment. Data buffers may be aligned on 60 * any byte boundary. 61 * 62 * o 64-bit DMA 63 * 64 * o TCP/IP checksum offload for both RX and TX 65 * 66 * o High and normal priority transmit DMA rings 67 * 68 * o VLAN tag insertion and extraction 69 * 70 * o TCP large send (segmentation offload) 71 * 72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 73 * programming API is fairly straightforward. The RX filtering, EEPROM 74 * access and PHY access is the same as it is on the older 8139 series 75 * chips. 76 * 77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 78 * same programming API and feature set as the 8139C+ with the following 79 * differences and additions: 80 * 81 * o 1000Mbps mode 82 * 83 * o Jumbo frames 84 * 85 * o GMII and TBI ports/registers for interfacing with copper 86 * or fiber PHYs 87 * 88 * o RX and TX DMA rings can have up to 1024 descriptors 89 * (the 8139C+ allows a maximum of 64) 90 * 91 * o Slight differences in register layout from the 8139C+ 92 * 93 * The TX start and timer interrupt registers are at different locations 94 * on the 8169 than they are on the 8139C+. Also, the status word in the 95 * RX descriptor has a slightly different bit layout. The 8169 does not 96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 97 * copper gigE PHY. 98 * 99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 100 * (the 'S' stands for 'single-chip'). These devices have the same 101 * programming API as the older 8169, but also have some vendor-specific 102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 104 * 105 * This driver takes advantage of the RX and TX checksum offload and 106 * VLAN tag insertion/extraction features. It also implements TX 107 * interrupt moderation using the timer interrupt registers, which 108 * significantly reduces TX interrupt load. There is also support 109 * for jumbo frames, however the 8169/8169S/8110S can not transmit 110 * jumbo frames larger than 7.5K, so the max MTU possible with this 111 * driver is 7500 bytes. 112 */ 113 114 #include "opt_polling.h" 115 116 #include <sys/param.h> 117 #include <sys/endian.h> 118 #include <sys/systm.h> 119 #include <sys/sockio.h> 120 #include <sys/mbuf.h> 121 #include <sys/malloc.h> 122 #include <sys/module.h> 123 #include <sys/kernel.h> 124 #include <sys/socket.h> 125 #include <sys/serialize.h> 126 #include <sys/thread2.h> 127 128 #include <net/if.h> 129 #include <net/ifq_var.h> 130 #include <net/if_arp.h> 131 #include <net/ethernet.h> 132 #include <net/if_dl.h> 133 #include <net/if_media.h> 134 #include <net/if_types.h> 135 #include <net/vlan/if_vlan_var.h> 136 137 #include <net/bpf.h> 138 139 #include <machine/bus_pio.h> 140 #include <machine/bus_memio.h> 141 #include <machine/bus.h> 142 #include <machine/resource.h> 143 #include <sys/bus.h> 144 #include <sys/rman.h> 145 146 #include <dev/netif/mii_layer/mii.h> 147 #include <dev/netif/mii_layer/miivar.h> 148 149 #include <bus/pci/pcireg.h> 150 #include <bus/pci/pcivar.h> 151 152 /* "controller miibus0" required. See GENERIC if you get errors here. */ 153 #include "miibus_if.h" 154 155 #include <dev/netif/re/if_rereg.h> 156 157 /* 158 * The hardware supports checksumming but, as usual, some chipsets screw it 159 * all up and produce bogus packets, so we disable it by default. 160 */ 161 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 162 #define RE_DISABLE_HWCSUM 163 164 /* 165 * Various supported device vendors/types and their names. 166 */ 167 static struct re_type re_devs[] = { 168 { RT_VENDORID, RT_DEVICEID_8139, RE_HWREV_8139CPLUS, 169 "RealTek 8139C+ 10/100BaseTX" }, 170 { RT_VENDORID, RT_DEVICEID_8169, RE_HWREV_8169, 171 "RealTek 8169 Gigabit Ethernet" }, 172 { RT_VENDORID, RT_DEVICEID_8169, RE_HWREV_8169S, 173 "RealTek 8169S Single-chip Gigabit Ethernet" }, 174 { RT_VENDORID, RT_DEVICEID_8169, RE_HWREV_8110S, 175 "RealTek 8110S Single-chip Gigabit Ethernet" }, 176 { 0, 0, 0, NULL } 177 }; 178 179 static struct re_hwrev re_hwrevs[] = { 180 { RE_HWREV_8139CPLUS, RE_8139CPLUS, "C+"}, 181 { RE_HWREV_8169, RE_8169, "8169"}, 182 { RE_HWREV_8169S, RE_8169, "8169S"}, 183 { RE_HWREV_8110S, RE_8169, "8110S"}, 184 { 0, 0, NULL } 185 }; 186 187 static int re_probe(device_t); 188 static int re_attach(device_t); 189 static int re_detach(device_t); 190 191 static int re_encap(struct re_softc *, struct mbuf **, int *, int *); 192 193 static void re_dma_map_addr(void *, bus_dma_segment_t *, int, int); 194 static void re_dma_map_desc(void *, bus_dma_segment_t *, int, 195 bus_size_t, int); 196 static int re_allocmem(device_t, struct re_softc *); 197 static int re_newbuf(struct re_softc *, int, struct mbuf *); 198 static int re_rx_list_init(struct re_softc *); 199 static int re_tx_list_init(struct re_softc *); 200 static void re_rxeof(struct re_softc *); 201 static void re_txeof(struct re_softc *); 202 static void re_intr(void *); 203 static void re_tick(void *); 204 static void re_tick_serialized(void *); 205 static void re_start(struct ifnet *); 206 static int re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 207 static void re_init(void *); 208 static void re_stop(struct re_softc *); 209 static void re_watchdog(struct ifnet *); 210 static int re_suspend(device_t); 211 static int re_resume(device_t); 212 static void re_shutdown(device_t); 213 static int re_ifmedia_upd(struct ifnet *); 214 static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); 215 216 static void re_eeprom_putbyte(struct re_softc *, int); 217 static void re_eeprom_getword(struct re_softc *, int, u_int16_t *); 218 static void re_read_eeprom(struct re_softc *, caddr_t, int, int, int); 219 static int re_gmii_readreg(device_t, int, int); 220 static int re_gmii_writereg(device_t, int, int, int); 221 222 static int re_miibus_readreg(device_t, int, int); 223 static int re_miibus_writereg(device_t, int, int, int); 224 static void re_miibus_statchg(device_t); 225 226 static void re_setmulti(struct re_softc *); 227 static void re_reset(struct re_softc *); 228 229 static int re_diag(struct re_softc *); 230 #ifdef DEVICE_POLLING 231 static void re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 232 #endif 233 234 static device_method_t re_methods[] = { 235 /* Device interface */ 236 DEVMETHOD(device_probe, re_probe), 237 DEVMETHOD(device_attach, re_attach), 238 DEVMETHOD(device_detach, re_detach), 239 DEVMETHOD(device_suspend, re_suspend), 240 DEVMETHOD(device_resume, re_resume), 241 DEVMETHOD(device_shutdown, re_shutdown), 242 243 /* bus interface */ 244 DEVMETHOD(bus_print_child, bus_generic_print_child), 245 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 246 247 /* MII interface */ 248 DEVMETHOD(miibus_readreg, re_miibus_readreg), 249 DEVMETHOD(miibus_writereg, re_miibus_writereg), 250 DEVMETHOD(miibus_statchg, re_miibus_statchg), 251 252 { 0, 0 } 253 }; 254 255 static driver_t re_driver = { 256 "re", 257 re_methods, 258 sizeof(struct re_softc) 259 }; 260 261 static devclass_t re_devclass; 262 263 DECLARE_DUMMY_MODULE(if_re); 264 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, 0, 0); 265 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, 0, 0); 266 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 267 268 #define EE_SET(x) \ 269 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) | (x)) 270 271 #define EE_CLR(x) \ 272 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) & ~(x)) 273 274 /* 275 * Send a read command and address to the EEPROM, check for ACK. 276 */ 277 static void 278 re_eeprom_putbyte(struct re_softc *sc, int addr) 279 { 280 int d, i; 281 282 d = addr | sc->re_eecmd_read; 283 284 /* 285 * Feed in each bit and strobe the clock. 286 */ 287 for (i = 0x400; i != 0; i >>= 1) { 288 if (d & i) 289 EE_SET(RE_EE_DATAIN); 290 else 291 EE_CLR(RE_EE_DATAIN); 292 DELAY(100); 293 EE_SET(RE_EE_CLK); 294 DELAY(150); 295 EE_CLR(RE_EE_CLK); 296 DELAY(100); 297 } 298 } 299 300 /* 301 * Read a word of data stored in the EEPROM at address 'addr.' 302 */ 303 static void 304 re_eeprom_getword(struct re_softc *sc, int addr, uint16_t *dest) 305 { 306 int i; 307 uint16_t word = 0; 308 309 /* Enter EEPROM access mode. */ 310 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_PROGRAM|RE_EE_SEL); 311 312 /* 313 * Send address of word we want to read. 314 */ 315 re_eeprom_putbyte(sc, addr); 316 317 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_PROGRAM|RE_EE_SEL); 318 319 /* 320 * Start reading bits from EEPROM. 321 */ 322 for (i = 0x8000; i != 0; i >>= 1) { 323 EE_SET(RE_EE_CLK); 324 DELAY(100); 325 if (CSR_READ_1(sc, RE_EECMD) & RE_EE_DATAOUT) 326 word |= i; 327 EE_CLR(RE_EE_CLK); 328 DELAY(100); 329 } 330 331 /* Turn off EEPROM access mode. */ 332 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF); 333 334 *dest = word; 335 } 336 337 /* 338 * Read a sequence of words from the EEPROM. 339 */ 340 static void 341 re_read_eeprom(struct re_softc *sc, caddr_t dest, int off, int cnt, int swap) 342 { 343 int i; 344 uint16_t word = 0, *ptr; 345 346 for (i = 0; i < cnt; i++) { 347 re_eeprom_getword(sc, off + i, &word); 348 ptr = (u_int16_t *)(dest + (i * 2)); 349 if (swap) 350 *ptr = be16toh(word); 351 else 352 *ptr = word; 353 } 354 } 355 356 static int 357 re_gmii_readreg(device_t dev, int phy, int reg) 358 { 359 struct re_softc *sc = device_get_softc(dev); 360 u_int32_t rval; 361 int i; 362 363 if (phy != 1) 364 return(0); 365 366 /* Let the rgephy driver read the GMEDIASTAT register */ 367 368 if (reg == RE_GMEDIASTAT) 369 return(CSR_READ_1(sc, RE_GMEDIASTAT)); 370 371 CSR_WRITE_4(sc, RE_PHYAR, reg << 16); 372 DELAY(1000); 373 374 for (i = 0; i < RE_TIMEOUT; i++) { 375 rval = CSR_READ_4(sc, RE_PHYAR); 376 if (rval & RE_PHYAR_BUSY) 377 break; 378 DELAY(100); 379 } 380 381 if (i == RE_TIMEOUT) { 382 device_printf(dev, "PHY read failed\n"); 383 return(0); 384 } 385 386 return(rval & RE_PHYAR_PHYDATA); 387 } 388 389 static int 390 re_gmii_writereg(device_t dev, int phy, int reg, int data) 391 { 392 struct re_softc *sc = device_get_softc(dev); 393 uint32_t rval; 394 int i; 395 396 CSR_WRITE_4(sc, RE_PHYAR, 397 (reg << 16) | (data & RE_PHYAR_PHYDATA) | RE_PHYAR_BUSY); 398 DELAY(1000); 399 400 for (i = 0; i < RE_TIMEOUT; i++) { 401 rval = CSR_READ_4(sc, RE_PHYAR); 402 if ((rval & RE_PHYAR_BUSY) == 0) 403 break; 404 DELAY(100); 405 } 406 407 if (i == RE_TIMEOUT) 408 device_printf(dev, "PHY write failed\n"); 409 410 return(0); 411 } 412 413 static int 414 re_miibus_readreg(device_t dev, int phy, int reg) 415 { 416 struct re_softc *sc = device_get_softc(dev); 417 uint16_t rval = 0; 418 uint16_t re8139_reg = 0; 419 420 if (sc->re_type == RE_8169) { 421 rval = re_gmii_readreg(dev, phy, reg); 422 return(rval); 423 } 424 425 /* Pretend the internal PHY is only at address 0 */ 426 if (phy) 427 return(0); 428 429 switch(reg) { 430 case MII_BMCR: 431 re8139_reg = RE_BMCR; 432 break; 433 case MII_BMSR: 434 re8139_reg = RE_BMSR; 435 break; 436 case MII_ANAR: 437 re8139_reg = RE_ANAR; 438 break; 439 case MII_ANER: 440 re8139_reg = RE_ANER; 441 break; 442 case MII_ANLPAR: 443 re8139_reg = RE_LPAR; 444 break; 445 case MII_PHYIDR1: 446 case MII_PHYIDR2: 447 return(0); 448 /* 449 * Allow the rlphy driver to read the media status 450 * register. If we have a link partner which does not 451 * support NWAY, this is the register which will tell 452 * us the results of parallel detection. 453 */ 454 case RE_MEDIASTAT: 455 return(CSR_READ_1(sc, RE_MEDIASTAT)); 456 default: 457 device_printf(dev, "bad phy register\n"); 458 return(0); 459 } 460 rval = CSR_READ_2(sc, re8139_reg); 461 return(rval); 462 } 463 464 static int 465 re_miibus_writereg(device_t dev, int phy, int reg, int data) 466 { 467 struct re_softc *sc= device_get_softc(dev); 468 u_int16_t re8139_reg = 0; 469 470 if (sc->re_type == RE_8169) 471 return(re_gmii_writereg(dev, phy, reg, data)); 472 473 /* Pretend the internal PHY is only at address 0 */ 474 if (phy) 475 return(0); 476 477 switch(reg) { 478 case MII_BMCR: 479 re8139_reg = RE_BMCR; 480 break; 481 case MII_BMSR: 482 re8139_reg = RE_BMSR; 483 break; 484 case MII_ANAR: 485 re8139_reg = RE_ANAR; 486 break; 487 case MII_ANER: 488 re8139_reg = RE_ANER; 489 break; 490 case MII_ANLPAR: 491 re8139_reg = RE_LPAR; 492 break; 493 case MII_PHYIDR1: 494 case MII_PHYIDR2: 495 return(0); 496 default: 497 device_printf(dev, "bad phy register\n"); 498 return(0); 499 } 500 CSR_WRITE_2(sc, re8139_reg, data); 501 return(0); 502 } 503 504 static void 505 re_miibus_statchg(device_t dev) 506 { 507 } 508 509 /* 510 * Program the 64-bit multicast hash filter. 511 */ 512 static void 513 re_setmulti(struct re_softc *sc) 514 { 515 struct ifnet *ifp = &sc->arpcom.ac_if; 516 int h = 0; 517 uint32_t hashes[2] = { 0, 0 }; 518 struct ifmultiaddr *ifma; 519 uint32_t rxfilt; 520 int mcnt = 0; 521 522 rxfilt = CSR_READ_4(sc, RE_RXCFG); 523 524 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 525 rxfilt |= RE_RXCFG_RX_MULTI; 526 CSR_WRITE_4(sc, RE_RXCFG, rxfilt); 527 CSR_WRITE_4(sc, RE_MAR0, 0xFFFFFFFF); 528 CSR_WRITE_4(sc, RE_MAR4, 0xFFFFFFFF); 529 return; 530 } 531 532 /* first, zot all the existing hash bits */ 533 CSR_WRITE_4(sc, RE_MAR0, 0); 534 CSR_WRITE_4(sc, RE_MAR4, 0); 535 536 /* now program new ones */ 537 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 538 if (ifma->ifma_addr->sa_family != AF_LINK) 539 continue; 540 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 541 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 542 if (h < 32) 543 hashes[0] |= (1 << h); 544 else 545 hashes[1] |= (1 << (h - 32)); 546 mcnt++; 547 } 548 549 if (mcnt) 550 rxfilt |= RE_RXCFG_RX_MULTI; 551 else 552 rxfilt &= ~RE_RXCFG_RX_MULTI; 553 554 CSR_WRITE_4(sc, RE_RXCFG, rxfilt); 555 CSR_WRITE_4(sc, RE_MAR0, hashes[0]); 556 CSR_WRITE_4(sc, RE_MAR4, hashes[1]); 557 } 558 559 static void 560 re_reset(struct re_softc *sc) 561 { 562 int i; 563 564 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_RESET); 565 566 for (i = 0; i < RE_TIMEOUT; i++) { 567 DELAY(10); 568 if ((CSR_READ_1(sc, RE_COMMAND) & RE_CMD_RESET) == 0) 569 break; 570 } 571 if (i == RE_TIMEOUT) 572 if_printf(&sc->arpcom.ac_if, "reset never completed!\n"); 573 574 CSR_WRITE_1(sc, 0x82, 1); 575 } 576 577 /* 578 * The following routine is designed to test for a defect on some 579 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 580 * lines connected to the bus, however for a 32-bit only card, they 581 * should be pulled high. The result of this defect is that the 582 * NIC will not work right if you plug it into a 64-bit slot: DMA 583 * operations will be done with 64-bit transfers, which will fail 584 * because the 64-bit data lines aren't connected. 585 * 586 * There's no way to work around this (short of talking a soldering 587 * iron to the board), however we can detect it. The method we use 588 * here is to put the NIC into digital loopback mode, set the receiver 589 * to promiscuous mode, and then try to send a frame. We then compare 590 * the frame data we sent to what was received. If the data matches, 591 * then the NIC is working correctly, otherwise we know the user has 592 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 593 * slot. In the latter case, there's no way the NIC can work correctly, 594 * so we print out a message on the console and abort the device attach. 595 */ 596 597 static int 598 re_diag(struct re_softc *sc) 599 { 600 struct ifnet *ifp = &sc->arpcom.ac_if; 601 struct mbuf *m0; 602 struct ether_header *eh; 603 struct re_desc *cur_rx; 604 uint16_t status; 605 uint32_t rxstat; 606 int total_len, i, error = 0; 607 uint8_t dst[ETHER_ADDR_LEN] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 608 uint8_t src[ETHER_ADDR_LEN] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 609 610 /* Allocate a single mbuf */ 611 612 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 613 if (m0 == NULL) 614 return(ENOBUFS); 615 616 /* 617 * Initialize the NIC in test mode. This sets the chip up 618 * so that it can send and receive frames, but performs the 619 * following special functions: 620 * - Puts receiver in promiscuous mode 621 * - Enables digital loopback mode 622 * - Leaves interrupts turned off 623 */ 624 625 ifp->if_flags |= IFF_PROMISC; 626 sc->re_testmode = 1; 627 re_init(sc); 628 re_stop(sc); 629 DELAY(100000); 630 re_init(sc); 631 632 /* Put some data in the mbuf */ 633 634 eh = mtod(m0, struct ether_header *); 635 bcopy (dst, eh->ether_dhost, ETHER_ADDR_LEN); 636 bcopy (src, eh->ether_shost, ETHER_ADDR_LEN); 637 eh->ether_type = htons(ETHERTYPE_IP); 638 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 639 640 /* 641 * Queue the packet, start transmission. 642 * Note: ifq_handoff() ultimately calls re_start() for us. 643 */ 644 645 CSR_WRITE_2(sc, RE_ISR, 0xFFFF); 646 error = ifq_handoff(ifp, m0, NULL); 647 if (error) { 648 m0 = NULL; 649 goto done; 650 } 651 m0 = NULL; 652 653 /* Wait for it to propagate through the chip */ 654 655 DELAY(100000); 656 for (i = 0; i < RE_TIMEOUT; i++) { 657 status = CSR_READ_2(sc, RE_ISR); 658 if ((status & (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) == 659 (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) 660 break; 661 DELAY(10); 662 } 663 664 if (i == RE_TIMEOUT) { 665 if_printf(ifp, "diagnostic failed to receive packet " 666 "in loopback mode\n"); 667 error = EIO; 668 goto done; 669 } 670 671 /* 672 * The packet should have been dumped into the first 673 * entry in the RX DMA ring. Grab it from there. 674 */ 675 676 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 677 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD); 678 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0], 679 BUS_DMASYNC_POSTWRITE); 680 bus_dmamap_unload(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0]); 681 682 m0 = sc->re_ldata.re_rx_mbuf[0]; 683 sc->re_ldata.re_rx_mbuf[0] = NULL; 684 eh = mtod(m0, struct ether_header *); 685 686 cur_rx = &sc->re_ldata.re_rx_list[0]; 687 total_len = RE_RXBYTES(cur_rx); 688 rxstat = le32toh(cur_rx->re_cmdstat); 689 690 if (total_len != ETHER_MIN_LEN) { 691 if_printf(ifp, "diagnostic failed, received short packet\n"); 692 error = EIO; 693 goto done; 694 } 695 696 /* Test that the received packet data matches what we sent. */ 697 698 if (bcmp(eh->ether_dhost, dst, ETHER_ADDR_LEN) || 699 bcmp(eh->ether_shost, &src, ETHER_ADDR_LEN) || 700 be16toh(eh->ether_type) != ETHERTYPE_IP) { 701 if_printf(ifp, "WARNING, DMA FAILURE!\n"); 702 if_printf(ifp, "expected TX data: %6D/%6D/0x%x\n", 703 dst, ":", src, ":", ETHERTYPE_IP); 704 if_printf(ifp, "received RX data: %6D/%6D/0x%x\n", 705 eh->ether_dhost, ":", eh->ether_shost, ":", 706 ntohs(eh->ether_type)); 707 if_printf(ifp, "You may have a defective 32-bit NIC plugged " 708 "into a 64-bit PCI slot.\n"); 709 if_printf(ifp, "Please re-install the NIC in a 32-bit slot " 710 "for proper operation.\n"); 711 if_printf(ifp, "Read the re(4) man page for more details.\n"); 712 error = EIO; 713 } 714 715 done: 716 /* Turn interface off, release resources */ 717 718 sc->re_testmode = 0; 719 ifp->if_flags &= ~IFF_PROMISC; 720 re_stop(sc); 721 if (m0 != NULL) 722 m_freem(m0); 723 724 return (error); 725 } 726 727 /* 728 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 729 * IDs against our list and return a device name if we find a match. 730 */ 731 static int 732 re_probe(device_t dev) 733 { 734 struct re_type *t; 735 struct re_softc *sc; 736 int rid; 737 uint32_t hwrev; 738 uint16_t vendor, product; 739 740 t = re_devs; 741 742 vendor = pci_get_vendor(dev); 743 product = pci_get_device(dev); 744 745 for (t = re_devs; t->re_name != NULL; t++) { 746 if (product == t->re_did && vendor == t->re_vid) 747 break; 748 } 749 750 /* 751 * Check if we found a RealTek device. 752 */ 753 if (t->re_name == NULL) 754 return(ENXIO); 755 756 /* 757 * Temporarily map the I/O space so we can read the chip ID register. 758 */ 759 sc = malloc(sizeof(*sc), M_TEMP, M_WAITOK | M_ZERO); 760 rid = RE_PCI_LOIO; 761 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 762 RF_ACTIVE); 763 if (sc->re_res == NULL) { 764 device_printf(dev, "couldn't map ports/memory\n"); 765 free(sc, M_TEMP); 766 return(ENXIO); 767 } 768 769 sc->re_btag = rman_get_bustag(sc->re_res); 770 sc->re_bhandle = rman_get_bushandle(sc->re_res); 771 772 hwrev = CSR_READ_4(sc, RE_TXCFG) & RE_TXCFG_HWREV; 773 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, sc->re_res); 774 free(sc, M_TEMP); 775 776 /* 777 * and continue matching for the specific chip... 778 */ 779 for (; t->re_name != NULL; t++) { 780 if (product == t->re_did && vendor == t->re_vid && 781 t->re_basetype == hwrev) { 782 device_set_desc(dev, t->re_name); 783 return(0); 784 } 785 } 786 return(ENXIO); 787 } 788 789 /* 790 * This routine takes the segment list provided as the result of 791 * a bus_dma_map_load() operation and assigns the addresses/lengths 792 * to RealTek DMA descriptors. This can be called either by the RX 793 * code or the TX code. In the RX case, we'll probably wind up mapping 794 * at most one segment. For the TX case, there could be any number of 795 * segments since TX packets may span multiple mbufs. In either case, 796 * if the number of segments is larger than the re_maxsegs limit 797 * specified by the caller, we abort the mapping operation. Sadly, 798 * whoever designed the buffer mapping API did not provide a way to 799 * return an error from here, so we have to fake it a bit. 800 */ 801 802 static void 803 re_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, 804 bus_size_t mapsize, int error) 805 { 806 struct re_dmaload_arg *ctx; 807 struct re_desc *d = NULL; 808 int i = 0, idx; 809 uint32_t cmdstat; 810 811 if (error) 812 return; 813 814 ctx = arg; 815 816 /* Signal error to caller if there's too many segments */ 817 if (nseg > ctx->re_maxsegs) { 818 ctx->re_maxsegs = 0; 819 return; 820 } 821 822 /* 823 * Map the segment array into descriptors. Note that we set the 824 * start-of-frame and end-of-frame markers for either TX or RX, but 825 * they really only have meaning in the TX case. (In the RX case, 826 * it's the chip that tells us where packets begin and end.) 827 * We also keep track of the end of the ring and set the 828 * end-of-ring bits as needed, and we set the ownership bits 829 * in all except the very first descriptor. (The caller will 830 * set this descriptor later when it start transmission or 831 * reception.) 832 */ 833 idx = ctx->re_idx; 834 for (;;) { 835 d = &ctx->re_ring[idx]; 836 if (le32toh(d->re_cmdstat) & RE_RDESC_STAT_OWN) { 837 ctx->re_maxsegs = 0; 838 return; 839 } 840 cmdstat = segs[i].ds_len; 841 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr)); 842 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr)); 843 if (i == 0) 844 cmdstat |= RE_TDESC_CMD_SOF; 845 else 846 cmdstat |= RE_TDESC_CMD_OWN; 847 if (idx == (RE_RX_DESC_CNT - 1)) 848 cmdstat |= RE_TDESC_CMD_EOR; 849 d->re_cmdstat = htole32(cmdstat | ctx->re_flags); 850 i++; 851 if (i == nseg) 852 break; 853 RE_DESC_INC(idx); 854 } 855 856 d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF); 857 ctx->re_maxsegs = nseg; 858 ctx->re_idx = idx; 859 } 860 861 /* 862 * Map a single buffer address. 863 */ 864 865 static void 866 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 867 { 868 uint32_t *addr; 869 870 if (error) 871 return; 872 873 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 874 addr = arg; 875 *addr = segs->ds_addr; 876 } 877 878 static int 879 re_allocmem(device_t dev, struct re_softc *sc) 880 { 881 int error, i, nseg; 882 883 /* 884 * Allocate map for RX mbufs. 885 */ 886 nseg = 32; 887 error = bus_dma_tag_create(sc->re_parent_tag, ETHER_ALIGN, 0, 888 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 889 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 890 &sc->re_ldata.re_mtag); 891 if (error) { 892 device_printf(dev, "could not allocate dma tag\n"); 893 return(error); 894 } 895 896 /* 897 * Allocate map for TX descriptor list. 898 */ 899 error = bus_dma_tag_create(sc->re_parent_tag, RE_RING_ALIGN, 900 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 901 NULL, RE_TX_LIST_SZ, 1, RE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 902 &sc->re_ldata.re_tx_list_tag); 903 if (error) { 904 device_printf(dev, "could not allocate dma tag\n"); 905 return(error); 906 } 907 908 /* Allocate DMA'able memory for the TX ring */ 909 910 error = bus_dmamem_alloc(sc->re_ldata.re_tx_list_tag, 911 (void **)&sc->re_ldata.re_tx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO, 912 &sc->re_ldata.re_tx_list_map); 913 if (error) { 914 device_printf(dev, "could not allocate TX ring\n"); 915 return(error); 916 } 917 918 /* Load the map for the TX ring. */ 919 920 error = bus_dmamap_load(sc->re_ldata.re_tx_list_tag, 921 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list, 922 RE_TX_LIST_SZ, re_dma_map_addr, 923 &sc->re_ldata.re_tx_list_addr, BUS_DMA_NOWAIT); 924 if (error) { 925 device_printf(dev, "could not get addres of TX ring\n"); 926 return(error); 927 } 928 929 /* Create DMA maps for TX buffers */ 930 931 for (i = 0; i < RE_TX_DESC_CNT; i++) { 932 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 933 &sc->re_ldata.re_tx_dmamap[i]); 934 if (error) { 935 device_printf(dev, "can't create DMA map for TX\n"); 936 return(error); 937 } 938 } 939 940 /* 941 * Allocate map for RX descriptor list. 942 */ 943 error = bus_dma_tag_create(sc->re_parent_tag, RE_RING_ALIGN, 944 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 945 NULL, RE_TX_LIST_SZ, 1, RE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 946 &sc->re_ldata.re_rx_list_tag); 947 if (error) { 948 device_printf(dev, "could not allocate dma tag\n"); 949 return(error); 950 } 951 952 /* Allocate DMA'able memory for the RX ring */ 953 954 error = bus_dmamem_alloc(sc->re_ldata.re_rx_list_tag, 955 (void **)&sc->re_ldata.re_rx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO, 956 &sc->re_ldata.re_rx_list_map); 957 if (error) { 958 device_printf(dev, "could not allocate RX ring\n"); 959 return(error); 960 } 961 962 /* Load the map for the RX ring. */ 963 964 error = bus_dmamap_load(sc->re_ldata.re_rx_list_tag, 965 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list, 966 RE_TX_LIST_SZ, re_dma_map_addr, 967 &sc->re_ldata.re_rx_list_addr, BUS_DMA_NOWAIT); 968 if (error) { 969 device_printf(dev, "could not get address of RX ring\n"); 970 return(error); 971 } 972 973 /* Create DMA maps for RX buffers */ 974 975 for (i = 0; i < RE_RX_DESC_CNT; i++) { 976 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0, 977 &sc->re_ldata.re_rx_dmamap[i]); 978 if (error) { 979 device_printf(dev, "can't create DMA map for RX\n"); 980 return(ENOMEM); 981 } 982 } 983 984 return(0); 985 } 986 987 /* 988 * Attach the interface. Allocate softc structures, do ifmedia 989 * setup and ethernet/BPF attach. 990 */ 991 static int 992 re_attach(device_t dev) 993 { 994 struct re_softc *sc = device_get_softc(dev); 995 struct ifnet *ifp; 996 struct re_hwrev *hw_rev; 997 uint8_t eaddr[ETHER_ADDR_LEN]; 998 int hwrev; 999 u_int16_t re_did = 0; 1000 int error = 0, rid, i; 1001 1002 callout_init(&sc->re_timer); 1003 1004 #ifndef BURN_BRIDGES 1005 /* 1006 * Handle power management nonsense. 1007 */ 1008 1009 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1010 uint32_t membase, irq; 1011 1012 /* Save important PCI config data. */ 1013 membase = pci_read_config(dev, RE_PCI_LOMEM, 4); 1014 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1015 1016 /* Reset the power state. */ 1017 device_printf(dev, "chip is is in D%d power mode " 1018 "-- setting to D0\n", pci_get_powerstate(dev)); 1019 1020 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1021 1022 /* Restore PCI config data. */ 1023 pci_write_config(dev, RE_PCI_LOMEM, membase, 4); 1024 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1025 } 1026 #endif 1027 /* 1028 * Map control/status registers. 1029 */ 1030 pci_enable_busmaster(dev); 1031 1032 rid = RE_PCI_LOIO; 1033 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 1034 RF_ACTIVE); 1035 1036 if (sc->re_res == NULL) { 1037 device_printf(dev, "couldn't map ports/memory\n"); 1038 error = ENXIO; 1039 goto fail; 1040 } 1041 1042 sc->re_btag = rman_get_bustag(sc->re_res); 1043 sc->re_bhandle = rman_get_bushandle(sc->re_res); 1044 1045 /* Allocate interrupt */ 1046 rid = 0; 1047 sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1048 RF_SHAREABLE | RF_ACTIVE); 1049 1050 if (sc->re_irq == NULL) { 1051 device_printf(dev, "couldn't map interrupt\n"); 1052 error = ENXIO; 1053 goto fail; 1054 } 1055 1056 /* Reset the adapter. */ 1057 re_reset(sc); 1058 1059 hwrev = CSR_READ_4(sc, RE_TXCFG) & RE_TXCFG_HWREV; 1060 for (hw_rev = re_hwrevs; hw_rev->re_desc != NULL; hw_rev++) { 1061 if (hw_rev->re_rev == hwrev) { 1062 sc->re_type = hw_rev->re_type; 1063 break; 1064 } 1065 } 1066 1067 if (sc->re_type == RE_8169) { 1068 /* Set RX length mask */ 1069 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN; 1070 1071 /* Force station address autoload from the EEPROM */ 1072 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_AUTOLOAD); 1073 for (i = 0; i < RE_TIMEOUT; i++) { 1074 if ((CSR_READ_1(sc, RE_EECMD) & RE_EEMODE_AUTOLOAD) == 0) 1075 break; 1076 DELAY(100); 1077 } 1078 if (i == RE_TIMEOUT) 1079 device_printf(dev, "eeprom autoload timed out\n"); 1080 1081 for (i = 0; i < ETHER_ADDR_LEN; i++) 1082 eaddr[i] = CSR_READ_1(sc, RE_IDR0 + i); 1083 } else { 1084 uint16_t as[3]; 1085 1086 /* Set RX length mask */ 1087 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN; 1088 1089 sc->re_eecmd_read = RE_EECMD_READ_6BIT; 1090 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0); 1091 if (re_did != 0x8129) 1092 sc->re_eecmd_read = RE_EECMD_READ_8BIT; 1093 1094 /* 1095 * Get station address from the EEPROM. 1096 */ 1097 re_read_eeprom(sc, (caddr_t)as, RE_EE_EADDR, 3, 0); 1098 for (i = 0; i < 3; i++) { 1099 eaddr[(i * 2) + 0] = as[i] & 0xff; 1100 eaddr[(i * 2) + 1] = as[i] >> 8; 1101 } 1102 } 1103 1104 /* 1105 * Allocate the parent bus DMA tag appropriate for PCI. 1106 */ 1107 #define RE_NSEG_NEW 32 1108 error = bus_dma_tag_create(NULL, /* parent */ 1109 1, 0, /* alignment, boundary */ 1110 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1111 BUS_SPACE_MAXADDR, /* highaddr */ 1112 NULL, NULL, /* filter, filterarg */ 1113 MAXBSIZE, RE_NSEG_NEW, /* maxsize, nsegments */ 1114 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1115 BUS_DMA_ALLOCNOW, /* flags */ 1116 &sc->re_parent_tag); 1117 if (error) 1118 goto fail; 1119 1120 error = re_allocmem(dev, sc); 1121 1122 if (error) 1123 goto fail; 1124 1125 /* Do MII setup */ 1126 if (mii_phy_probe(dev, &sc->re_miibus, 1127 re_ifmedia_upd, re_ifmedia_sts)) { 1128 device_printf(dev, "MII without any phy!\n"); 1129 error = ENXIO; 1130 goto fail; 1131 } 1132 1133 ifp = &sc->arpcom.ac_if; 1134 ifp->if_softc = sc; 1135 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1136 ifp->if_mtu = ETHERMTU; 1137 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1138 ifp->if_ioctl = re_ioctl; 1139 ifp->if_capabilities = IFCAP_VLAN_MTU; 1140 ifp->if_start = re_start; 1141 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1142 #ifdef DEVICE_POLLING 1143 ifp->if_poll = re_poll; 1144 #endif 1145 ifp->if_watchdog = re_watchdog; 1146 ifp->if_init = re_init; 1147 if (sc->re_type == RE_8169) 1148 ifp->if_baudrate = 1000000000; 1149 else 1150 ifp->if_baudrate = 100000000; 1151 ifq_set_maxlen(&ifp->if_snd, RE_IFQ_MAXLEN); 1152 ifq_set_ready(&ifp->if_snd); 1153 #ifdef RE_DISABLE_HWCSUM 1154 ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM; 1155 ifp->if_hwassist = 0; 1156 #else 1157 ifp->if_capenable = ifp->if_capabilities; 1158 ifp->if_hwassist = RE_CSUM_FEATURES; 1159 #endif 1160 1161 /* 1162 * Call MI attach routine. 1163 */ 1164 ether_ifattach(ifp, eaddr, NULL); 1165 1166 lwkt_serialize_enter(ifp->if_serializer); 1167 /* Perform hardware diagnostic. */ 1168 error = re_diag(sc); 1169 lwkt_serialize_exit(ifp->if_serializer); 1170 1171 if (error) { 1172 device_printf(dev, "hardware diagnostic failure\n"); 1173 ether_ifdetach(ifp); 1174 goto fail; 1175 } 1176 1177 /* Hook interrupt last to avoid having to lock softc */ 1178 error = bus_setup_intr(dev, sc->re_irq, INTR_NETSAFE, re_intr, sc, 1179 &sc->re_intrhand, ifp->if_serializer); 1180 1181 if (error) { 1182 device_printf(dev, "couldn't set up irq\n"); 1183 ether_ifdetach(ifp); 1184 goto fail; 1185 } 1186 1187 fail: 1188 if (error) 1189 re_detach(dev); 1190 1191 return (error); 1192 } 1193 1194 /* 1195 * Shutdown hardware and free up resources. This can be called any 1196 * time after the mutex has been initialized. It is called in both 1197 * the error case in attach and the normal detach case so it needs 1198 * to be careful about only freeing resources that have actually been 1199 * allocated. 1200 */ 1201 static int 1202 re_detach(device_t dev) 1203 { 1204 struct re_softc *sc = device_get_softc(dev); 1205 struct ifnet *ifp = &sc->arpcom.ac_if; 1206 int i; 1207 1208 lwkt_serialize_enter(ifp->if_serializer); 1209 1210 /* These should only be active if attach succeeded */ 1211 if (device_is_attached(dev)) { 1212 re_stop(sc); 1213 ether_ifdetach(ifp); 1214 } 1215 if (sc->re_miibus) 1216 device_delete_child(dev, sc->re_miibus); 1217 bus_generic_detach(dev); 1218 1219 if (sc->re_intrhand) 1220 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand); 1221 1222 if (sc->re_irq) 1223 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->re_irq); 1224 if (sc->re_res) 1225 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, 1226 sc->re_res); 1227 1228 /* Unload and free the RX DMA ring memory and map */ 1229 1230 if (sc->re_ldata.re_rx_list_tag) { 1231 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag, 1232 sc->re_ldata.re_rx_list_map); 1233 bus_dmamem_free(sc->re_ldata.re_rx_list_tag, 1234 sc->re_ldata.re_rx_list, 1235 sc->re_ldata.re_rx_list_map); 1236 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag); 1237 } 1238 1239 /* Unload and free the TX DMA ring memory and map */ 1240 1241 if (sc->re_ldata.re_tx_list_tag) { 1242 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag, 1243 sc->re_ldata.re_tx_list_map); 1244 bus_dmamem_free(sc->re_ldata.re_tx_list_tag, 1245 sc->re_ldata.re_tx_list, 1246 sc->re_ldata.re_tx_list_map); 1247 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag); 1248 } 1249 1250 /* Destroy all the RX and TX buffer maps */ 1251 1252 if (sc->re_ldata.re_mtag) { 1253 for (i = 0; i < RE_TX_DESC_CNT; i++) 1254 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1255 sc->re_ldata.re_tx_dmamap[i]); 1256 for (i = 0; i < RE_RX_DESC_CNT; i++) 1257 bus_dmamap_destroy(sc->re_ldata.re_mtag, 1258 sc->re_ldata.re_rx_dmamap[i]); 1259 bus_dma_tag_destroy(sc->re_ldata.re_mtag); 1260 } 1261 1262 /* Unload and free the stats buffer and map */ 1263 1264 if (sc->re_ldata.re_stag) { 1265 bus_dmamap_unload(sc->re_ldata.re_stag, 1266 sc->re_ldata.re_rx_list_map); 1267 bus_dmamem_free(sc->re_ldata.re_stag, 1268 sc->re_ldata.re_stats, 1269 sc->re_ldata.re_smap); 1270 bus_dma_tag_destroy(sc->re_ldata.re_stag); 1271 } 1272 1273 if (sc->re_parent_tag) 1274 bus_dma_tag_destroy(sc->re_parent_tag); 1275 1276 lwkt_serialize_exit(ifp->if_serializer); 1277 return(0); 1278 } 1279 1280 static int 1281 re_newbuf(struct re_softc *sc, int idx, struct mbuf *m) 1282 { 1283 struct re_dmaload_arg arg; 1284 struct mbuf *n = NULL; 1285 int error; 1286 1287 if (m == NULL) { 1288 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 1289 if (n == NULL) 1290 return(ENOBUFS); 1291 m = n; 1292 } else 1293 m->m_data = m->m_ext.ext_buf; 1294 1295 /* 1296 * Initialize mbuf length fields and fixup 1297 * alignment so that the frame payload is 1298 * longword aligned. 1299 */ 1300 m->m_len = m->m_pkthdr.len = MCLBYTES; 1301 m_adj(m, ETHER_ALIGN); 1302 1303 arg.sc = sc; 1304 arg.re_idx = idx; 1305 arg.re_maxsegs = 1; 1306 arg.re_flags = 0; 1307 arg.re_ring = sc->re_ldata.re_rx_list; 1308 1309 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, 1310 sc->re_ldata.re_rx_dmamap[idx], m, re_dma_map_desc, 1311 &arg, BUS_DMA_NOWAIT); 1312 if (error || arg.re_maxsegs != 1) { 1313 if (n != NULL) 1314 m_freem(n); 1315 return (ENOMEM); 1316 } 1317 1318 sc->re_ldata.re_rx_list[idx].re_cmdstat |= htole32(RE_RDESC_CMD_OWN); 1319 sc->re_ldata.re_rx_mbuf[idx] = m; 1320 1321 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[idx], 1322 BUS_DMASYNC_PREREAD); 1323 1324 return(0); 1325 } 1326 1327 static int 1328 re_tx_list_init(struct re_softc *sc) 1329 { 1330 bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ); 1331 bzero(&sc->re_ldata.re_tx_mbuf, RE_TX_DESC_CNT * sizeof(struct mbuf *)); 1332 1333 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 1334 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE); 1335 sc->re_ldata.re_tx_prodidx = 0; 1336 sc->re_ldata.re_tx_considx = 0; 1337 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT; 1338 1339 return(0); 1340 } 1341 1342 static int 1343 re_rx_list_init(struct re_softc *sc) 1344 { 1345 int i, error; 1346 1347 bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ); 1348 bzero(&sc->re_ldata.re_rx_mbuf, RE_RX_DESC_CNT * sizeof(struct mbuf *)); 1349 1350 for (i = 0; i < RE_RX_DESC_CNT; i++) { 1351 error = re_newbuf(sc, i, NULL); 1352 if (error) 1353 return(error); 1354 } 1355 1356 /* Flush the RX descriptors */ 1357 1358 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1359 sc->re_ldata.re_rx_list_map, 1360 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1361 1362 sc->re_ldata.re_rx_prodidx = 0; 1363 sc->re_head = sc->re_tail = NULL; 1364 1365 return(0); 1366 } 1367 1368 /* 1369 * RX handler for C+ and 8169. For the gigE chips, we support 1370 * the reception of jumbo frames that have been fragmented 1371 * across multiple 2K mbuf cluster buffers. 1372 */ 1373 static void 1374 re_rxeof(struct re_softc *sc) 1375 { 1376 struct ifnet *ifp = &sc->arpcom.ac_if; 1377 struct mbuf *m; 1378 struct re_desc *cur_rx; 1379 uint32_t rxstat, rxvlan; 1380 int i, total_len; 1381 1382 /* Invalidate the descriptor memory */ 1383 1384 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1385 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD); 1386 1387 for (i = sc->re_ldata.re_rx_prodidx; 1388 RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0 ; RE_DESC_INC(i)) { 1389 cur_rx = &sc->re_ldata.re_rx_list[i]; 1390 m = sc->re_ldata.re_rx_mbuf[i]; 1391 total_len = RE_RXBYTES(cur_rx); 1392 rxstat = le32toh(cur_rx->re_cmdstat); 1393 rxvlan = le32toh(cur_rx->re_vlanctl); 1394 1395 /* Invalidate the RX mbuf and unload its map */ 1396 1397 bus_dmamap_sync(sc->re_ldata.re_mtag, 1398 sc->re_ldata.re_rx_dmamap[i], 1399 BUS_DMASYNC_POSTWRITE); 1400 bus_dmamap_unload(sc->re_ldata.re_mtag, 1401 sc->re_ldata.re_rx_dmamap[i]); 1402 1403 if ((rxstat & RE_RDESC_STAT_EOF) == 0) { 1404 m->m_len = MCLBYTES - ETHER_ALIGN; 1405 if (sc->re_head == NULL) { 1406 sc->re_head = sc->re_tail = m; 1407 } else { 1408 sc->re_tail->m_next = m; 1409 sc->re_tail = m; 1410 } 1411 re_newbuf(sc, i, NULL); 1412 continue; 1413 } 1414 1415 /* 1416 * NOTE: for the 8139C+, the frame length field 1417 * is always 12 bits in size, but for the gigE chips, 1418 * it is 13 bits (since the max RX frame length is 16K). 1419 * Unfortunately, all 32 bits in the status word 1420 * were already used, so to make room for the extra 1421 * length bit, RealTek took out the 'frame alignment 1422 * error' bit and shifted the other status bits 1423 * over one slot. The OWN, EOR, FS and LS bits are 1424 * still in the same places. We have already extracted 1425 * the frame length and checked the OWN bit, so rather 1426 * than using an alternate bit mapping, we shift the 1427 * status bits one space to the right so we can evaluate 1428 * them using the 8169 status as though it was in the 1429 * same format as that of the 8139C+. 1430 */ 1431 if (sc->re_type == RE_8169) 1432 rxstat >>= 1; 1433 1434 if (rxstat & RE_RDESC_STAT_RXERRSUM) { 1435 ifp->if_ierrors++; 1436 /* 1437 * If this is part of a multi-fragment packet, 1438 * discard all the pieces. 1439 */ 1440 if (sc->re_head != NULL) { 1441 m_freem(sc->re_head); 1442 sc->re_head = sc->re_tail = NULL; 1443 } 1444 re_newbuf(sc, i, m); 1445 continue; 1446 } 1447 1448 /* 1449 * If allocating a replacement mbuf fails, 1450 * reload the current one. 1451 */ 1452 1453 if (re_newbuf(sc, i, NULL)) { 1454 ifp->if_ierrors++; 1455 if (sc->re_head != NULL) { 1456 m_freem(sc->re_head); 1457 sc->re_head = sc->re_tail = NULL; 1458 } 1459 re_newbuf(sc, i, m); 1460 continue; 1461 } 1462 1463 if (sc->re_head != NULL) { 1464 m->m_len = total_len % (MCLBYTES - ETHER_ALIGN); 1465 /* 1466 * Special case: if there's 4 bytes or less 1467 * in this buffer, the mbuf can be discarded: 1468 * the last 4 bytes is the CRC, which we don't 1469 * care about anyway. 1470 */ 1471 if (m->m_len <= ETHER_CRC_LEN) { 1472 sc->re_tail->m_len -= 1473 (ETHER_CRC_LEN - m->m_len); 1474 m_freem(m); 1475 } else { 1476 m->m_len -= ETHER_CRC_LEN; 1477 sc->re_tail->m_next = m; 1478 } 1479 m = sc->re_head; 1480 sc->re_head = sc->re_tail = NULL; 1481 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1482 } else 1483 m->m_pkthdr.len = m->m_len = 1484 (total_len - ETHER_CRC_LEN); 1485 1486 ifp->if_ipackets++; 1487 m->m_pkthdr.rcvif = ifp; 1488 1489 /* Do RX checksumming if enabled */ 1490 1491 if (ifp->if_capenable & IFCAP_RXCSUM) { 1492 1493 /* Check IP header checksum */ 1494 if (rxstat & RE_RDESC_STAT_PROTOID) 1495 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1496 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0) 1497 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1498 1499 /* Check TCP/UDP checksum */ 1500 if ((RE_TCPPKT(rxstat) && 1501 (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) || 1502 (RE_UDPPKT(rxstat) && 1503 (rxstat & RE_RDESC_STAT_UDPSUMBAD)) == 0) { 1504 m->m_pkthdr.csum_flags |= 1505 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1506 m->m_pkthdr.csum_data = 0xffff; 1507 } 1508 } 1509 1510 if (rxvlan & RE_RDESC_VLANCTL_TAG) { 1511 VLAN_INPUT_TAG(m, 1512 be16toh((rxvlan & RE_RDESC_VLANCTL_DATA))); 1513 } else { 1514 ifp->if_input(ifp, m); 1515 } 1516 } 1517 1518 /* Flush the RX DMA ring */ 1519 1520 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag, 1521 sc->re_ldata.re_rx_list_map, 1522 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1523 1524 sc->re_ldata.re_rx_prodidx = i; 1525 } 1526 1527 static void 1528 re_txeof(struct re_softc *sc) 1529 { 1530 struct ifnet *ifp = &sc->arpcom.ac_if; 1531 uint32_t txstat; 1532 int idx; 1533 1534 /* Invalidate the TX descriptor list */ 1535 1536 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 1537 sc->re_ldata.re_tx_list_map, 1538 BUS_DMASYNC_POSTREAD); 1539 1540 for (idx = sc->re_ldata.re_tx_considx; 1541 idx != sc->re_ldata.re_tx_prodidx; RE_DESC_INC(idx)) { 1542 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat); 1543 if (txstat & RE_TDESC_CMD_OWN) 1544 break; 1545 1546 /* 1547 * We only stash mbufs in the last descriptor 1548 * in a fragment chain, which also happens to 1549 * be the only place where the TX status bits 1550 * are valid. 1551 */ 1552 if (txstat & RE_TDESC_CMD_EOF) { 1553 m_freem(sc->re_ldata.re_tx_mbuf[idx]); 1554 sc->re_ldata.re_tx_mbuf[idx] = NULL; 1555 bus_dmamap_unload(sc->re_ldata.re_mtag, 1556 sc->re_ldata.re_tx_dmamap[idx]); 1557 if (txstat & (RE_TDESC_STAT_EXCESSCOL| 1558 RE_TDESC_STAT_COLCNT)) 1559 ifp->if_collisions++; 1560 if (txstat & RE_TDESC_STAT_TXERRSUM) 1561 ifp->if_oerrors++; 1562 else 1563 ifp->if_opackets++; 1564 } 1565 sc->re_ldata.re_tx_free++; 1566 } 1567 1568 /* No changes made to the TX ring, so no flush needed */ 1569 if (idx != sc->re_ldata.re_tx_considx) { 1570 sc->re_ldata.re_tx_considx = idx; 1571 ifp->if_flags &= ~IFF_OACTIVE; 1572 ifp->if_timer = 0; 1573 } 1574 1575 /* 1576 * If not all descriptors have been released reaped yet, 1577 * reload the timer so that we will eventually get another 1578 * interrupt that will cause us to re-enter this routine. 1579 * This is done in case the transmitter has gone idle. 1580 */ 1581 if (sc->re_ldata.re_tx_free != RE_TX_DESC_CNT) 1582 CSR_WRITE_4(sc, RE_TIMERCNT, 1); 1583 } 1584 1585 static void 1586 re_tick(void *xsc) 1587 { 1588 struct re_softc *sc = xsc; 1589 1590 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); 1591 re_tick_serialized(xsc); 1592 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); 1593 } 1594 1595 static void 1596 re_tick_serialized(void *xsc) 1597 { 1598 struct re_softc *sc = xsc; 1599 struct mii_data *mii; 1600 1601 mii = device_get_softc(sc->re_miibus); 1602 mii_tick(mii); 1603 1604 callout_reset(&sc->re_timer, hz, re_tick, sc); 1605 } 1606 1607 #ifdef DEVICE_POLLING 1608 1609 static void 1610 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1611 { 1612 struct re_softc *sc = ifp->if_softc; 1613 1614 switch(cmd) { 1615 case POLL_REGISTER: 1616 /* disable interrupts */ 1617 CSR_WRITE_2(sc, RE_IMR, 0x0000); 1618 break; 1619 case POLL_DEREGISTER: 1620 /* enable interrupts */ 1621 CSR_WRITE_2(sc, RE_IMR, RE_INTRS_CPLUS); 1622 break; 1623 default: 1624 sc->rxcycles = count; 1625 re_rxeof(sc); 1626 re_txeof(sc); 1627 1628 if (!ifq_is_empty(&ifp->if_snd)) 1629 (*ifp->if_start)(ifp); 1630 1631 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1632 uint16_t status; 1633 1634 status = CSR_READ_2(sc, RE_ISR); 1635 if (status == 0xffff) 1636 return; 1637 if (status) 1638 CSR_WRITE_2(sc, RE_ISR, status); 1639 1640 /* 1641 * XXX check behaviour on receiver stalls. 1642 */ 1643 1644 if (status & RE_ISR_SYSTEM_ERR) { 1645 re_reset(sc); 1646 re_init(sc); 1647 } 1648 } 1649 break; 1650 } 1651 } 1652 #endif /* DEVICE_POLLING */ 1653 1654 static void 1655 re_intr(void *arg) 1656 { 1657 struct re_softc *sc = arg; 1658 struct ifnet *ifp = &sc->arpcom.ac_if; 1659 uint16_t status; 1660 1661 if (sc->suspended || (ifp->if_flags & IFF_UP) == 0) 1662 return; 1663 1664 for (;;) { 1665 status = CSR_READ_2(sc, RE_ISR); 1666 /* If the card has gone away the read returns 0xffff. */ 1667 if (status == 0xffff) 1668 break; 1669 if (status) 1670 CSR_WRITE_2(sc, RE_ISR, status); 1671 1672 if ((status & RE_INTRS_CPLUS) == 0) 1673 break; 1674 1675 if (status & RE_ISR_RX_OK) 1676 re_rxeof(sc); 1677 1678 if (status & RE_ISR_RX_ERR) 1679 re_rxeof(sc); 1680 1681 if ((status & RE_ISR_TIMEOUT_EXPIRED) || 1682 (status & RE_ISR_TX_ERR) || 1683 (status & RE_ISR_TX_DESC_UNAVAIL)) 1684 re_txeof(sc); 1685 1686 if (status & RE_ISR_SYSTEM_ERR) { 1687 re_reset(sc); 1688 re_init(sc); 1689 } 1690 1691 if (status & RE_ISR_LINKCHG) 1692 re_tick_serialized(sc); 1693 } 1694 1695 if (!ifq_is_empty(&ifp->if_snd)) 1696 (*ifp->if_start)(ifp); 1697 } 1698 1699 static int 1700 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx, int *called_defrag) 1701 { 1702 struct ifnet *ifp = &sc->arpcom.ac_if; 1703 struct mbuf *m, *m_new = NULL; 1704 struct re_dmaload_arg arg; 1705 bus_dmamap_t map; 1706 int error; 1707 1708 *called_defrag = 0; 1709 if (sc->re_ldata.re_tx_free <= 4) 1710 return(EFBIG); 1711 1712 m = *m_head; 1713 1714 /* 1715 * Set up checksum offload. Note: checksum offload bits must 1716 * appear in all descriptors of a multi-descriptor transmit 1717 * attempt. (This is according to testing done with an 8169 1718 * chip. I'm not sure if this is a requirement or a bug.) 1719 */ 1720 1721 arg.re_flags = 0; 1722 1723 if (m->m_pkthdr.csum_flags & CSUM_IP) 1724 arg.re_flags |= RE_TDESC_CMD_IPCSUM; 1725 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1726 arg.re_flags |= RE_TDESC_CMD_TCPCSUM; 1727 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1728 arg.re_flags |= RE_TDESC_CMD_UDPCSUM; 1729 1730 arg.sc = sc; 1731 arg.re_idx = *idx; 1732 arg.re_maxsegs = sc->re_ldata.re_tx_free; 1733 if (arg.re_maxsegs > 4) 1734 arg.re_maxsegs -= 4; 1735 arg.re_ring = sc->re_ldata.re_tx_list; 1736 1737 map = sc->re_ldata.re_tx_dmamap[*idx]; 1738 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map, 1739 m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 1740 1741 if (error && error != EFBIG) { 1742 if_printf(ifp, "can't map mbuf (error %d)\n", error); 1743 return(ENOBUFS); 1744 } 1745 1746 /* Too many segments to map, coalesce into a single mbuf */ 1747 1748 if (error || arg.re_maxsegs == 0) { 1749 m_new = m_defrag_nofree(m, MB_DONTWAIT); 1750 if (m_new == NULL) 1751 return(1); 1752 else { 1753 m = m_new; 1754 *m_head = m; 1755 } 1756 1757 *called_defrag = 1; 1758 arg.sc = sc; 1759 arg.re_idx = *idx; 1760 arg.re_maxsegs = sc->re_ldata.re_tx_free; 1761 arg.re_ring = sc->re_ldata.re_tx_list; 1762 1763 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map, 1764 m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 1765 if (error) { 1766 m_freem(m); 1767 if_printf(ifp, "can't map mbuf (error %d)\n", error); 1768 return(EFBIG); 1769 } 1770 } 1771 1772 /* 1773 * Insure that the map for this transmission 1774 * is placed at the array index of the last descriptor 1775 * in this chain. 1776 */ 1777 sc->re_ldata.re_tx_dmamap[*idx] = 1778 sc->re_ldata.re_tx_dmamap[arg.re_idx]; 1779 sc->re_ldata.re_tx_dmamap[arg.re_idx] = map; 1780 1781 sc->re_ldata.re_tx_mbuf[arg.re_idx] = m; 1782 sc->re_ldata.re_tx_free -= arg.re_maxsegs; 1783 1784 /* 1785 * Set up hardware VLAN tagging. Note: vlan tag info must 1786 * appear in the first descriptor of a multi-descriptor 1787 * transmission attempt. 1788 */ 1789 1790 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1791 m->m_pkthdr.rcvif != NULL && 1792 m->m_pkthdr.rcvif->if_type == IFT_L2VLAN) { 1793 struct ifvlan *ifv; 1794 ifv = m->m_pkthdr.rcvif->if_softc; 1795 if (ifv != NULL) 1796 sc->re_ldata.re_tx_list[*idx].re_vlanctl = 1797 htole32(htobe16(ifv->ifv_tag) | RE_TDESC_VLANCTL_TAG); 1798 } 1799 1800 /* Transfer ownership of packet to the chip. */ 1801 1802 sc->re_ldata.re_tx_list[arg.re_idx].re_cmdstat |= 1803 htole32(RE_TDESC_CMD_OWN); 1804 if (*idx != arg.re_idx) 1805 sc->re_ldata.re_tx_list[*idx].re_cmdstat |= 1806 htole32(RE_TDESC_CMD_OWN); 1807 1808 RE_DESC_INC(arg.re_idx); 1809 *idx = arg.re_idx; 1810 1811 return(0); 1812 } 1813 1814 /* 1815 * Main transmit routine for C+ and gigE NICs. 1816 */ 1817 1818 static void 1819 re_start(struct ifnet *ifp) 1820 { 1821 struct re_softc *sc = ifp->if_softc; 1822 struct mbuf *m_head; 1823 struct mbuf *m_head2; 1824 int called_defrag, idx, need_trans; 1825 1826 idx = sc->re_ldata.re_tx_prodidx; 1827 1828 need_trans = 0; 1829 while (sc->re_ldata.re_tx_mbuf[idx] == NULL) { 1830 m_head = ifq_poll(&ifp->if_snd); 1831 if (m_head == NULL) 1832 break; 1833 m_head2 = m_head; 1834 if (re_encap(sc, &m_head2, &idx, &called_defrag)) { 1835 /* 1836 * If we could not encapsulate the defragged packet, 1837 * the returned m_head2 is garbage and we must dequeue 1838 * and throw away the original packet. 1839 */ 1840 if (called_defrag) { 1841 ifq_dequeue(&ifp->if_snd, m_head); 1842 m_freem(m_head); 1843 } 1844 ifp->if_flags |= IFF_OACTIVE; 1845 break; 1846 } 1847 1848 /* 1849 * Clean out the packet we encapsulated. If we defragged 1850 * the packet the m_head2 is the one that got encapsulated 1851 * and the original must be thrown away. Otherwise m_head2 1852 * *IS* the original. 1853 */ 1854 ifq_dequeue(&ifp->if_snd, m_head); 1855 if (called_defrag) 1856 m_freem(m_head); 1857 need_trans = 1; 1858 1859 /* 1860 * If there's a BPF listener, bounce a copy of this frame 1861 * to him. 1862 */ 1863 BPF_MTAP(ifp, m_head2); 1864 } 1865 1866 if (!need_trans) { 1867 return; 1868 } 1869 1870 /* Flush the TX descriptors */ 1871 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag, 1872 sc->re_ldata.re_tx_list_map, 1873 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1874 1875 sc->re_ldata.re_tx_prodidx = idx; 1876 1877 /* 1878 * RealTek put the TX poll request register in a different 1879 * location on the 8169 gigE chip. I don't know why. 1880 */ 1881 if (sc->re_type == RE_8169) 1882 CSR_WRITE_2(sc, RE_GTXSTART, RE_TXSTART_START); 1883 else 1884 CSR_WRITE_2(sc, RE_TXSTART, RE_TXSTART_START); 1885 1886 /* 1887 * Use the countdown timer for interrupt moderation. 1888 * 'TX done' interrupts are disabled. Instead, we reset the 1889 * countdown timer, which will begin counting until it hits 1890 * the value in the TIMERINT register, and then trigger an 1891 * interrupt. Each time we write to the TIMERCNT register, 1892 * the timer count is reset to 0. 1893 */ 1894 CSR_WRITE_4(sc, RE_TIMERCNT, 1); 1895 1896 /* 1897 * Set a timeout in case the chip goes out to lunch. 1898 */ 1899 ifp->if_timer = 5; 1900 } 1901 1902 static void 1903 re_init(void *xsc) 1904 { 1905 struct re_softc *sc = xsc; 1906 struct ifnet *ifp = &sc->arpcom.ac_if; 1907 struct mii_data *mii; 1908 uint32_t rxcfg = 0; 1909 1910 mii = device_get_softc(sc->re_miibus); 1911 1912 /* 1913 * Cancel pending I/O and free all RX/TX buffers. 1914 */ 1915 re_stop(sc); 1916 1917 /* 1918 * Enable C+ RX and TX mode, as well as VLAN stripping and 1919 * RX checksum offload. We must configure the C+ register 1920 * before all others. 1921 */ 1922 CSR_WRITE_2(sc, RE_CPLUS_CMD, RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB | 1923 RE_CPLUSCMD_PCI_MRW | RE_CPLUSCMD_VLANSTRIP | 1924 (ifp->if_capenable & IFCAP_RXCSUM ? 1925 RE_CPLUSCMD_RXCSUM_ENB : 0)); 1926 1927 /* 1928 * Init our MAC address. Even though the chipset 1929 * documentation doesn't mention it, we need to enter "Config 1930 * register write enable" mode to modify the ID registers. 1931 */ 1932 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_WRITECFG); 1933 CSR_WRITE_STREAM_4(sc, RE_IDR0, 1934 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1935 CSR_WRITE_STREAM_4(sc, RE_IDR4, 1936 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1937 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF); 1938 1939 /* 1940 * For C+ mode, initialize the RX descriptors and mbufs. 1941 */ 1942 re_rx_list_init(sc); 1943 re_tx_list_init(sc); 1944 1945 /* 1946 * Enable transmit and receive. 1947 */ 1948 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB); 1949 1950 /* 1951 * Set the initial TX and RX configuration. 1952 */ 1953 if (sc->re_testmode) { 1954 if (sc->re_type == RE_8169) 1955 CSR_WRITE_4(sc, RE_TXCFG, 1956 RE_TXCFG_CONFIG | RE_LOOPTEST_ON); 1957 else 1958 CSR_WRITE_4(sc, RE_TXCFG, 1959 RE_TXCFG_CONFIG | RE_LOOPTEST_ON_CPLUS); 1960 } else 1961 CSR_WRITE_4(sc, RE_TXCFG, RE_TXCFG_CONFIG); 1962 CSR_WRITE_4(sc, RE_RXCFG, RE_RXCFG_CONFIG); 1963 1964 /* Set the individual bit to receive frames for this host only. */ 1965 rxcfg = CSR_READ_4(sc, RE_RXCFG); 1966 rxcfg |= RE_RXCFG_RX_INDIV; 1967 1968 /* If we want promiscuous mode, set the allframes bit. */ 1969 if (ifp->if_flags & IFF_PROMISC) { 1970 rxcfg |= RE_RXCFG_RX_ALLPHYS; 1971 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 1972 } else { 1973 rxcfg &= ~RE_RXCFG_RX_ALLPHYS; 1974 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 1975 } 1976 1977 /* 1978 * Set capture broadcast bit to capture broadcast frames. 1979 */ 1980 if (ifp->if_flags & IFF_BROADCAST) { 1981 rxcfg |= RE_RXCFG_RX_BROAD; 1982 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 1983 } else { 1984 rxcfg &= ~RE_RXCFG_RX_BROAD; 1985 CSR_WRITE_4(sc, RE_RXCFG, rxcfg); 1986 } 1987 1988 /* 1989 * Program the multicast filter, if necessary. 1990 */ 1991 re_setmulti(sc); 1992 1993 #ifdef DEVICE_POLLING 1994 /* 1995 * Disable interrupts if we are polling. 1996 */ 1997 if (ifp->if_flags & IFF_POLLING) 1998 CSR_WRITE_2(sc, RE_IMR, 0); 1999 else /* otherwise ... */ 2000 #endif /* DEVICE_POLLING */ 2001 /* 2002 * Enable interrupts. 2003 */ 2004 if (sc->re_testmode) 2005 CSR_WRITE_2(sc, RE_IMR, 0); 2006 else 2007 CSR_WRITE_2(sc, RE_IMR, RE_INTRS_CPLUS); 2008 2009 /* Set initial TX threshold */ 2010 sc->re_txthresh = RE_TX_THRESH_INIT; 2011 2012 /* Start RX/TX process. */ 2013 CSR_WRITE_4(sc, RE_MISSEDPKT, 0); 2014 #ifdef notdef 2015 /* Enable receiver and transmitter. */ 2016 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB); 2017 #endif 2018 /* 2019 * Load the addresses of the RX and TX lists into the chip. 2020 */ 2021 2022 CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI, 2023 RE_ADDR_HI(sc->re_ldata.re_rx_list_addr)); 2024 CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO, 2025 RE_ADDR_LO(sc->re_ldata.re_rx_list_addr)); 2026 2027 CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI, 2028 RE_ADDR_HI(sc->re_ldata.re_tx_list_addr)); 2029 CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO, 2030 RE_ADDR_LO(sc->re_ldata.re_tx_list_addr)); 2031 2032 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, 16); 2033 2034 /* 2035 * Initialize the timer interrupt register so that 2036 * a timer interrupt will be generated once the timer 2037 * reaches a certain number of ticks. The timer is 2038 * reloaded on each transmit. This gives us TX interrupt 2039 * moderation, which dramatically improves TX frame rate. 2040 */ 2041 2042 if (sc->re_type == RE_8169) 2043 CSR_WRITE_4(sc, RE_TIMERINT_8169, 0x800); 2044 else 2045 CSR_WRITE_4(sc, RE_TIMERINT, 0x400); 2046 2047 /* 2048 * For 8169 gigE NICs, set the max allowed RX packet 2049 * size so we can receive jumbo frames. 2050 */ 2051 if (sc->re_type == RE_8169) 2052 CSR_WRITE_2(sc, RE_MAXRXPKTLEN, 16383); 2053 2054 if (sc->re_testmode) { 2055 return; 2056 } 2057 2058 mii_mediachg(mii); 2059 2060 CSR_WRITE_1(sc, RE_CFG1, RE_CFG1_DRVLOAD|RE_CFG1_FULLDUPLEX); 2061 2062 ifp->if_flags |= IFF_RUNNING; 2063 ifp->if_flags &= ~IFF_OACTIVE; 2064 2065 callout_reset(&sc->re_timer, hz, re_tick, sc); 2066 } 2067 2068 /* 2069 * Set media options. 2070 */ 2071 static int 2072 re_ifmedia_upd(struct ifnet *ifp) 2073 { 2074 struct re_softc *sc = ifp->if_softc; 2075 struct mii_data *mii; 2076 2077 mii = device_get_softc(sc->re_miibus); 2078 mii_mediachg(mii); 2079 2080 return(0); 2081 } 2082 2083 /* 2084 * Report current media status. 2085 */ 2086 static void 2087 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2088 { 2089 struct re_softc *sc = ifp->if_softc; 2090 struct mii_data *mii; 2091 2092 mii = device_get_softc(sc->re_miibus); 2093 2094 mii_pollstat(mii); 2095 ifmr->ifm_active = mii->mii_media_active; 2096 ifmr->ifm_status = mii->mii_media_status; 2097 } 2098 2099 static int 2100 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2101 { 2102 struct re_softc *sc = ifp->if_softc; 2103 struct ifreq *ifr = (struct ifreq *) data; 2104 struct mii_data *mii; 2105 int error = 0; 2106 2107 switch(command) { 2108 case SIOCSIFMTU: 2109 if (ifr->ifr_mtu > RE_JUMBO_MTU) 2110 error = EINVAL; 2111 ifp->if_mtu = ifr->ifr_mtu; 2112 break; 2113 case SIOCSIFFLAGS: 2114 if (ifp->if_flags & IFF_UP) 2115 re_init(sc); 2116 else if (ifp->if_flags & IFF_RUNNING) 2117 re_stop(sc); 2118 error = 0; 2119 break; 2120 case SIOCADDMULTI: 2121 case SIOCDELMULTI: 2122 re_setmulti(sc); 2123 error = 0; 2124 break; 2125 case SIOCGIFMEDIA: 2126 case SIOCSIFMEDIA: 2127 mii = device_get_softc(sc->re_miibus); 2128 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2129 break; 2130 case SIOCSIFCAP: 2131 ifp->if_capenable &= ~(IFCAP_HWCSUM); 2132 ifp->if_capenable |= 2133 ifr->ifr_reqcap & (IFCAP_HWCSUM); 2134 if (ifp->if_capenable & IFCAP_TXCSUM) 2135 ifp->if_hwassist = RE_CSUM_FEATURES; 2136 else 2137 ifp->if_hwassist = 0; 2138 if (ifp->if_flags & IFF_RUNNING) 2139 re_init(sc); 2140 break; 2141 default: 2142 error = ether_ioctl(ifp, command, data); 2143 break; 2144 } 2145 return(error); 2146 } 2147 2148 static void 2149 re_watchdog(struct ifnet *ifp) 2150 { 2151 struct re_softc *sc = ifp->if_softc; 2152 2153 if_printf(ifp, "watchdog timeout\n"); 2154 2155 ifp->if_oerrors++; 2156 2157 re_txeof(sc); 2158 re_rxeof(sc); 2159 2160 re_init(sc); 2161 2162 if (!ifq_is_empty(&ifp->if_snd)) 2163 ifp->if_start(ifp); 2164 } 2165 2166 /* 2167 * Stop the adapter and free any mbufs allocated to the 2168 * RX and TX lists. 2169 */ 2170 static void 2171 re_stop(struct re_softc *sc) 2172 { 2173 struct ifnet *ifp = &sc->arpcom.ac_if; 2174 int i; 2175 2176 ifp->if_timer = 0; 2177 callout_stop(&sc->re_timer); 2178 2179 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2180 2181 CSR_WRITE_1(sc, RE_COMMAND, 0x00); 2182 CSR_WRITE_2(sc, RE_IMR, 0x0000); 2183 2184 if (sc->re_head != NULL) { 2185 m_freem(sc->re_head); 2186 sc->re_head = sc->re_tail = NULL; 2187 } 2188 2189 /* Free the TX list buffers. */ 2190 for (i = 0; i < RE_TX_DESC_CNT; i++) { 2191 if (sc->re_ldata.re_tx_mbuf[i] != NULL) { 2192 bus_dmamap_unload(sc->re_ldata.re_mtag, 2193 sc->re_ldata.re_tx_dmamap[i]); 2194 m_freem(sc->re_ldata.re_tx_mbuf[i]); 2195 sc->re_ldata.re_tx_mbuf[i] = NULL; 2196 } 2197 } 2198 2199 /* Free the RX list buffers. */ 2200 for (i = 0; i < RE_RX_DESC_CNT; i++) { 2201 if (sc->re_ldata.re_rx_mbuf[i] != NULL) { 2202 bus_dmamap_unload(sc->re_ldata.re_mtag, 2203 sc->re_ldata.re_rx_dmamap[i]); 2204 m_freem(sc->re_ldata.re_rx_mbuf[i]); 2205 sc->re_ldata.re_rx_mbuf[i] = NULL; 2206 } 2207 } 2208 } 2209 2210 /* 2211 * Device suspend routine. Stop the interface and save some PCI 2212 * settings in case the BIOS doesn't restore them properly on 2213 * resume. 2214 */ 2215 static int 2216 re_suspend(device_t dev) 2217 { 2218 #ifndef BURN_BRIDGES 2219 int i; 2220 #endif 2221 struct re_softc *sc = device_get_softc(dev); 2222 2223 re_stop(sc); 2224 2225 #ifndef BURN_BRIDGES 2226 for (i = 0; i < 5; i++) 2227 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2228 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2229 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2230 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2231 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2232 #endif 2233 2234 sc->suspended = 1; 2235 2236 return (0); 2237 } 2238 2239 /* 2240 * Device resume routine. Restore some PCI settings in case the BIOS 2241 * doesn't, re-enable busmastering, and restart the interface if 2242 * appropriate. 2243 */ 2244 static int 2245 re_resume(device_t dev) 2246 { 2247 struct re_softc *sc = device_get_softc(dev); 2248 struct ifnet *ifp = &sc->arpcom.ac_if; 2249 #ifndef BURN_BRIDGES 2250 int i; 2251 #endif 2252 2253 #ifndef BURN_BRIDGES 2254 /* better way to do this? */ 2255 for (i = 0; i < 5; i++) 2256 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 2257 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 2258 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 2259 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 2260 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 2261 2262 /* reenable busmastering */ 2263 pci_enable_busmaster(dev); 2264 pci_enable_io(dev, SYS_RES_IOPORT); 2265 #endif 2266 2267 /* reinitialize interface if necessary */ 2268 if (ifp->if_flags & IFF_UP) 2269 re_init(sc); 2270 2271 sc->suspended = 0; 2272 2273 return (0); 2274 } 2275 2276 /* 2277 * Stop all chip I/O so that the kernel's probe routines don't 2278 * get confused by errant DMAs when rebooting. 2279 */ 2280 static void 2281 re_shutdown(device_t dev) 2282 { 2283 struct re_softc *sc = device_get_softc(dev); 2284 2285 re_stop(sc); 2286 } 2287