1 /* $NetBSD: rtl81x9.c,v 1.102 2017/02/20 07:43:29 ozaki-r Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp 35 */ 36 37 /* 38 * RealTek 8129/8139 PCI NIC driver 39 * 40 * Supports several extremely cheap PCI 10/100 adapters based on 41 * the RealTek chipset. Datasheets can be obtained from 42 * www.realtek.com.tw. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 51 * probably the worst PCI ethernet controller ever made, with the possible 52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 53 * DMA, but it has a terrible interface that nullifies any performance 54 * gains that bus-master DMA usually offers. 55 * 56 * For transmission, the chip offers a series of four TX descriptor 57 * registers. Each transmit frame must be in a contiguous buffer, aligned 58 * on a longword (32-bit) boundary. This means we almost always have to 59 * do mbuf copies in order to transmit a frame, except in the unlikely 60 * case where a) the packet fits into a single mbuf, and b) the packet 61 * is 32-bit aligned within the mbuf's data area. The presence of only 62 * four descriptor registers means that we can never have more than four 63 * packets queued for transmission at any one time. 64 * 65 * Reception is not much better. The driver has to allocate a single large 66 * buffer area (up to 64K in size) into which the chip will DMA received 67 * frames. Because we don't know where within this region received packets 68 * will begin or end, we have no choice but to copy data from the buffer 69 * area into mbufs in order to pass the packets up to the higher protocol 70 * levels. 71 * 72 * It's impossible given this rotten design to really achieve decent 73 * performance at 100Mbps, unless you happen to have a 400MHz PII or 74 * some equally overmuscled CPU to drive it. 75 * 76 * On the bright side, the 8139 does have a built-in PHY, although 77 * rather than using an MDIO serial interface like most other NICs, the 78 * PHY registers are directly accessible through the 8139's register 79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 80 * filter. 81 * 82 * The 8129 chip is an older version of the 8139 that uses an external PHY 83 * chip. The 8129 has a serial MDIO interface for accessing the MII where 84 * the 8139 lets you directly access the on-board PHY registers. We need 85 * to select which interface to use depending on the chip type. 86 */ 87 88 #include <sys/cdefs.h> 89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.102 2017/02/20 07:43:29 ozaki-r Exp $"); 90 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/callout.h> 95 #include <sys/device.h> 96 #include <sys/sockio.h> 97 #include <sys/mbuf.h> 98 #include <sys/malloc.h> 99 #include <sys/kernel.h> 100 #include <sys/socket.h> 101 102 #include <net/if.h> 103 #include <net/if_arp.h> 104 #include <net/if_ether.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 108 #include <net/bpf.h> 109 #include <sys/rndsource.h> 110 111 #include <sys/bus.h> 112 #include <machine/endian.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 117 #include <dev/ic/rtl81x9reg.h> 118 #include <dev/ic/rtl81x9var.h> 119 120 static void rtk_reset(struct rtk_softc *); 121 static void rtk_rxeof(struct rtk_softc *); 122 static void rtk_txeof(struct rtk_softc *); 123 static void rtk_start(struct ifnet *); 124 static int rtk_ioctl(struct ifnet *, u_long, void *); 125 static int rtk_init(struct ifnet *); 126 static void rtk_stop(struct ifnet *, int); 127 128 static void rtk_watchdog(struct ifnet *); 129 130 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int); 131 static void rtk_mii_sync(struct rtk_softc *); 132 static void rtk_mii_send(struct rtk_softc *, uint32_t, int); 133 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *); 134 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *); 135 136 static int rtk_phy_readreg(device_t, int, int); 137 static void rtk_phy_writereg(device_t, int, int, int); 138 static void rtk_phy_statchg(struct ifnet *); 139 static void rtk_tick(void *); 140 141 static int rtk_enable(struct rtk_softc *); 142 static void rtk_disable(struct rtk_softc *); 143 144 static void rtk_list_tx_init(struct rtk_softc *); 145 146 #define EE_SET(x) \ 147 CSR_WRITE_1(sc, RTK_EECMD, \ 148 CSR_READ_1(sc, RTK_EECMD) | (x)) 149 150 #define EE_CLR(x) \ 151 CSR_WRITE_1(sc, RTK_EECMD, \ 152 CSR_READ_1(sc, RTK_EECMD) & ~(x)) 153 154 #define EE_DELAY() DELAY(100) 155 156 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 157 158 /* 159 * Send a read command and address to the EEPROM, check for ACK. 160 */ 161 static void 162 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len) 163 { 164 int d, i; 165 166 d = (RTK_EECMD_READ << addr_len) | addr; 167 168 /* 169 * Feed in each bit and stobe the clock. 170 */ 171 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) { 172 if (d & (1 << (i - 1))) { 173 EE_SET(RTK_EE_DATAIN); 174 } else { 175 EE_CLR(RTK_EE_DATAIN); 176 } 177 EE_DELAY(); 178 EE_SET(RTK_EE_CLK); 179 EE_DELAY(); 180 EE_CLR(RTK_EE_CLK); 181 EE_DELAY(); 182 } 183 } 184 185 /* 186 * Read a word of data stored in the EEPROM at address 'addr.' 187 */ 188 uint16_t 189 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len) 190 { 191 uint16_t word; 192 int i; 193 194 /* Enter EEPROM access mode. */ 195 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM); 196 EE_DELAY(); 197 EE_SET(RTK_EE_SEL); 198 199 /* 200 * Send address of word we want to read. 201 */ 202 rtk_eeprom_putbyte(sc, addr, addr_len); 203 204 /* 205 * Start reading bits from EEPROM. 206 */ 207 word = 0; 208 for (i = 16; i > 0; i--) { 209 EE_SET(RTK_EE_CLK); 210 EE_DELAY(); 211 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT) 212 word |= 1 << (i - 1); 213 EE_CLR(RTK_EE_CLK); 214 EE_DELAY(); 215 } 216 217 /* Turn off EEPROM access mode. */ 218 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF); 219 220 return word; 221 } 222 223 /* 224 * MII access routines are provided for the 8129, which 225 * doesn't have a built-in PHY. For the 8139, we fake things 226 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the 227 * direct access PHY registers. 228 */ 229 #define MII_SET(x) \ 230 CSR_WRITE_1(sc, RTK_MII, \ 231 CSR_READ_1(sc, RTK_MII) | (x)) 232 233 #define MII_CLR(x) \ 234 CSR_WRITE_1(sc, RTK_MII, \ 235 CSR_READ_1(sc, RTK_MII) & ~(x)) 236 237 /* 238 * Sync the PHYs by setting data bit and strobing the clock 32 times. 239 */ 240 static void 241 rtk_mii_sync(struct rtk_softc *sc) 242 { 243 int i; 244 245 MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT); 246 247 for (i = 0; i < 32; i++) { 248 MII_SET(RTK_MII_CLK); 249 DELAY(1); 250 MII_CLR(RTK_MII_CLK); 251 DELAY(1); 252 } 253 } 254 255 /* 256 * Clock a series of bits through the MII. 257 */ 258 static void 259 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt) 260 { 261 int i; 262 263 MII_CLR(RTK_MII_CLK); 264 265 for (i = cnt; i > 0; i--) { 266 if (bits & (1 << (i - 1))) { 267 MII_SET(RTK_MII_DATAOUT); 268 } else { 269 MII_CLR(RTK_MII_DATAOUT); 270 } 271 DELAY(1); 272 MII_CLR(RTK_MII_CLK); 273 DELAY(1); 274 MII_SET(RTK_MII_CLK); 275 } 276 } 277 278 /* 279 * Read an PHY register through the MII. 280 */ 281 static int 282 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame) 283 { 284 int i, ack, s; 285 286 s = splnet(); 287 288 /* 289 * Set up frame for RX. 290 */ 291 frame->mii_stdelim = RTK_MII_STARTDELIM; 292 frame->mii_opcode = RTK_MII_READOP; 293 frame->mii_turnaround = 0; 294 frame->mii_data = 0; 295 296 CSR_WRITE_2(sc, RTK_MII, 0); 297 298 /* 299 * Turn on data xmit. 300 */ 301 MII_SET(RTK_MII_DIR); 302 303 rtk_mii_sync(sc); 304 305 /* 306 * Send command/address info. 307 */ 308 rtk_mii_send(sc, frame->mii_stdelim, 2); 309 rtk_mii_send(sc, frame->mii_opcode, 2); 310 rtk_mii_send(sc, frame->mii_phyaddr, 5); 311 rtk_mii_send(sc, frame->mii_regaddr, 5); 312 313 /* Idle bit */ 314 MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT)); 315 DELAY(1); 316 MII_SET(RTK_MII_CLK); 317 DELAY(1); 318 319 /* Turn off xmit. */ 320 MII_CLR(RTK_MII_DIR); 321 322 /* Check for ack */ 323 MII_CLR(RTK_MII_CLK); 324 DELAY(1); 325 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN; 326 MII_SET(RTK_MII_CLK); 327 DELAY(1); 328 329 /* 330 * Now try reading data bits. If the ack failed, we still 331 * need to clock through 16 cycles to keep the PHY(s) in sync. 332 */ 333 if (ack) { 334 for (i = 0; i < 16; i++) { 335 MII_CLR(RTK_MII_CLK); 336 DELAY(1); 337 MII_SET(RTK_MII_CLK); 338 DELAY(1); 339 } 340 goto fail; 341 } 342 343 for (i = 16; i > 0; i--) { 344 MII_CLR(RTK_MII_CLK); 345 DELAY(1); 346 if (!ack) { 347 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN) 348 frame->mii_data |= 1 << (i - 1); 349 DELAY(1); 350 } 351 MII_SET(RTK_MII_CLK); 352 DELAY(1); 353 } 354 355 fail: 356 MII_CLR(RTK_MII_CLK); 357 DELAY(1); 358 MII_SET(RTK_MII_CLK); 359 DELAY(1); 360 361 splx(s); 362 363 if (ack) 364 return 1; 365 return 0; 366 } 367 368 /* 369 * Write to a PHY register through the MII. 370 */ 371 static int 372 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame) 373 { 374 int s; 375 376 s = splnet(); 377 /* 378 * Set up frame for TX. 379 */ 380 frame->mii_stdelim = RTK_MII_STARTDELIM; 381 frame->mii_opcode = RTK_MII_WRITEOP; 382 frame->mii_turnaround = RTK_MII_TURNAROUND; 383 384 /* 385 * Turn on data output. 386 */ 387 MII_SET(RTK_MII_DIR); 388 389 rtk_mii_sync(sc); 390 391 rtk_mii_send(sc, frame->mii_stdelim, 2); 392 rtk_mii_send(sc, frame->mii_opcode, 2); 393 rtk_mii_send(sc, frame->mii_phyaddr, 5); 394 rtk_mii_send(sc, frame->mii_regaddr, 5); 395 rtk_mii_send(sc, frame->mii_turnaround, 2); 396 rtk_mii_send(sc, frame->mii_data, 16); 397 398 /* Idle bit. */ 399 MII_SET(RTK_MII_CLK); 400 DELAY(1); 401 MII_CLR(RTK_MII_CLK); 402 DELAY(1); 403 404 /* 405 * Turn off xmit. 406 */ 407 MII_CLR(RTK_MII_DIR); 408 409 splx(s); 410 411 return 0; 412 } 413 414 static int 415 rtk_phy_readreg(device_t self, int phy, int reg) 416 { 417 struct rtk_softc *sc = device_private(self); 418 struct rtk_mii_frame frame; 419 int rval; 420 int rtk8139_reg; 421 422 if ((sc->sc_quirk & RTKQ_8129) == 0) { 423 if (phy != 7) 424 return 0; 425 426 switch (reg) { 427 case MII_BMCR: 428 rtk8139_reg = RTK_BMCR; 429 break; 430 case MII_BMSR: 431 rtk8139_reg = RTK_BMSR; 432 break; 433 case MII_ANAR: 434 rtk8139_reg = RTK_ANAR; 435 break; 436 case MII_ANER: 437 rtk8139_reg = RTK_ANER; 438 break; 439 case MII_ANLPAR: 440 rtk8139_reg = RTK_LPAR; 441 break; 442 default: 443 #if 0 444 printf("%s: bad phy register\n", device_xname(self)); 445 #endif 446 return 0; 447 } 448 rval = CSR_READ_2(sc, rtk8139_reg); 449 return rval; 450 } 451 452 memset(&frame, 0, sizeof(frame)); 453 454 frame.mii_phyaddr = phy; 455 frame.mii_regaddr = reg; 456 rtk_mii_readreg(sc, &frame); 457 458 return frame.mii_data; 459 } 460 461 static void 462 rtk_phy_writereg(device_t self, int phy, int reg, int data) 463 { 464 struct rtk_softc *sc = device_private(self); 465 struct rtk_mii_frame frame; 466 int rtk8139_reg; 467 468 if ((sc->sc_quirk & RTKQ_8129) == 0) { 469 if (phy != 7) 470 return; 471 472 switch (reg) { 473 case MII_BMCR: 474 rtk8139_reg = RTK_BMCR; 475 break; 476 case MII_BMSR: 477 rtk8139_reg = RTK_BMSR; 478 break; 479 case MII_ANAR: 480 rtk8139_reg = RTK_ANAR; 481 break; 482 case MII_ANER: 483 rtk8139_reg = RTK_ANER; 484 break; 485 case MII_ANLPAR: 486 rtk8139_reg = RTK_LPAR; 487 break; 488 default: 489 #if 0 490 printf("%s: bad phy register\n", device_xname(self)); 491 #endif 492 return; 493 } 494 CSR_WRITE_2(sc, rtk8139_reg, data); 495 return; 496 } 497 498 memset(&frame, 0, sizeof(frame)); 499 500 frame.mii_phyaddr = phy; 501 frame.mii_regaddr = reg; 502 frame.mii_data = data; 503 504 rtk_mii_writereg(sc, &frame); 505 } 506 507 static void 508 rtk_phy_statchg(struct ifnet *ifp) 509 { 510 511 /* Nothing to do. */ 512 } 513 514 #define rtk_calchash(addr) \ 515 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) 516 517 /* 518 * Program the 64-bit multicast hash filter. 519 */ 520 void 521 rtk_setmulti(struct rtk_softc *sc) 522 { 523 struct ifnet *ifp; 524 uint32_t hashes[2] = { 0, 0 }; 525 uint32_t rxfilt; 526 struct ether_multi *enm; 527 struct ether_multistep step; 528 int h, mcnt; 529 530 ifp = &sc->ethercom.ec_if; 531 532 rxfilt = CSR_READ_4(sc, RTK_RXCFG); 533 534 if (ifp->if_flags & IFF_PROMISC) { 535 allmulti: 536 ifp->if_flags |= IFF_ALLMULTI; 537 rxfilt |= RTK_RXCFG_RX_MULTI; 538 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt); 539 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF); 540 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF); 541 return; 542 } 543 544 /* first, zot all the existing hash bits */ 545 CSR_WRITE_4(sc, RTK_MAR0, 0); 546 CSR_WRITE_4(sc, RTK_MAR4, 0); 547 548 /* now program new ones */ 549 ETHER_FIRST_MULTI(step, &sc->ethercom, enm); 550 mcnt = 0; 551 while (enm != NULL) { 552 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 553 ETHER_ADDR_LEN) != 0) 554 goto allmulti; 555 556 h = rtk_calchash(enm->enm_addrlo); 557 if (h < 32) 558 hashes[0] |= (1 << h); 559 else 560 hashes[1] |= (1 << (h - 32)); 561 mcnt++; 562 ETHER_NEXT_MULTI(step, enm); 563 } 564 565 ifp->if_flags &= ~IFF_ALLMULTI; 566 567 if (mcnt) 568 rxfilt |= RTK_RXCFG_RX_MULTI; 569 else 570 rxfilt &= ~RTK_RXCFG_RX_MULTI; 571 572 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt); 573 574 /* 575 * For some unfathomable reason, RealTek decided to reverse 576 * the order of the multicast hash registers in the PCI Express 577 * parts. This means we have to write the hash pattern in reverse 578 * order for those devices. 579 */ 580 if ((sc->sc_quirk & RTKQ_PCIE) != 0) { 581 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1])); 582 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0])); 583 } else { 584 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]); 585 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]); 586 } 587 } 588 589 void 590 rtk_reset(struct rtk_softc *sc) 591 { 592 int i; 593 594 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET); 595 596 for (i = 0; i < RTK_TIMEOUT; i++) { 597 DELAY(10); 598 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0) 599 break; 600 } 601 if (i == RTK_TIMEOUT) 602 printf("%s: reset never completed!\n", 603 device_xname(sc->sc_dev)); 604 } 605 606 /* 607 * Attach the interface. Allocate softc structures, do ifmedia 608 * setup and ethernet/BPF attach. 609 */ 610 void 611 rtk_attach(struct rtk_softc *sc) 612 { 613 device_t self = sc->sc_dev; 614 struct ifnet *ifp; 615 struct rtk_tx_desc *txd; 616 uint16_t val; 617 uint8_t eaddr[ETHER_ADDR_LEN]; 618 int error; 619 int i, addr_len; 620 621 callout_init(&sc->rtk_tick_ch, 0); 622 623 /* 624 * Check EEPROM type 9346 or 9356. 625 */ 626 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129) 627 addr_len = RTK_EEADDR_LEN1; 628 else 629 addr_len = RTK_EEADDR_LEN0; 630 631 /* 632 * Get station address. 633 */ 634 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len); 635 eaddr[0] = val & 0xff; 636 eaddr[1] = val >> 8; 637 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len); 638 eaddr[2] = val & 0xff; 639 eaddr[3] = val >> 8; 640 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len); 641 eaddr[4] = val & 0xff; 642 eaddr[5] = val >> 8; 643 644 if ((error = bus_dmamem_alloc(sc->sc_dmat, 645 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg, 646 BUS_DMA_NOWAIT)) != 0) { 647 aprint_error_dev(self, 648 "can't allocate recv buffer, error = %d\n", error); 649 goto fail_0; 650 } 651 652 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg, 653 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf, 654 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 655 aprint_error_dev(self, 656 "can't map recv buffer, error = %d\n", error); 657 goto fail_1; 658 } 659 660 if ((error = bus_dmamap_create(sc->sc_dmat, 661 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT, 662 &sc->recv_dmamap)) != 0) { 663 aprint_error_dev(self, 664 "can't create recv buffer DMA map, error = %d\n", error); 665 goto fail_2; 666 } 667 668 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap, 669 sc->rtk_rx_buf, RTK_RXBUFLEN + 16, 670 NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) { 671 aprint_error_dev(self, 672 "can't load recv buffer DMA map, error = %d\n", error); 673 goto fail_3; 674 } 675 676 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 677 txd = &sc->rtk_tx_descs[i]; 678 if ((error = bus_dmamap_create(sc->sc_dmat, 679 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 680 &txd->txd_dmamap)) != 0) { 681 aprint_error_dev(self, 682 "can't create snd buffer DMA map, error = %d\n", 683 error); 684 goto fail_4; 685 } 686 txd->txd_txaddr = RTK_TXADDR0 + (i * 4); 687 txd->txd_txstat = RTK_TXSTAT0 + (i * 4); 688 } 689 SIMPLEQ_INIT(&sc->rtk_tx_free); 690 SIMPLEQ_INIT(&sc->rtk_tx_dirty); 691 692 /* 693 * From this point forward, the attachment cannot fail. A failure 694 * before this releases all resources thar may have been 695 * allocated. 696 */ 697 sc->sc_flags |= RTK_ATTACHED; 698 699 /* Reset the adapter. */ 700 rtk_reset(sc); 701 702 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); 703 704 ifp = &sc->ethercom.ec_if; 705 ifp->if_softc = sc; 706 strcpy(ifp->if_xname, device_xname(self)); 707 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 708 ifp->if_ioctl = rtk_ioctl; 709 ifp->if_start = rtk_start; 710 ifp->if_watchdog = rtk_watchdog; 711 ifp->if_init = rtk_init; 712 ifp->if_stop = rtk_stop; 713 IFQ_SET_READY(&ifp->if_snd); 714 715 /* 716 * Do ifmedia setup. 717 */ 718 sc->mii.mii_ifp = ifp; 719 sc->mii.mii_readreg = rtk_phy_readreg; 720 sc->mii.mii_writereg = rtk_phy_writereg; 721 sc->mii.mii_statchg = rtk_phy_statchg; 722 sc->ethercom.ec_mii = &sc->mii; 723 ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange, 724 ether_mediastatus); 725 mii_attach(self, &sc->mii, 0xffffffff, 726 MII_PHY_ANY, MII_OFFSET_ANY, 0); 727 728 /* Choose a default media. */ 729 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) { 730 ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 731 ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE); 732 } else { 733 ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO); 734 } 735 736 /* 737 * Call MI attach routines. 738 */ 739 if_attach(ifp); 740 if_deferred_start_init(ifp, NULL); 741 ether_ifattach(ifp, eaddr); 742 743 rnd_attach_source(&sc->rnd_source, device_xname(self), 744 RND_TYPE_NET, RND_FLAG_DEFAULT); 745 746 return; 747 fail_4: 748 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 749 txd = &sc->rtk_tx_descs[i]; 750 if (txd->txd_dmamap != NULL) 751 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); 752 } 753 fail_3: 754 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); 755 fail_2: 756 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, 757 RTK_RXBUFLEN + 16); 758 fail_1: 759 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); 760 fail_0: 761 return; 762 } 763 764 /* 765 * Initialize the transmit descriptors. 766 */ 767 static void 768 rtk_list_tx_init(struct rtk_softc *sc) 769 { 770 struct rtk_tx_desc *txd; 771 int i; 772 773 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) 774 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); 775 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) 776 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q); 777 778 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 779 txd = &sc->rtk_tx_descs[i]; 780 CSR_WRITE_4(sc, txd->txd_txaddr, 0); 781 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q); 782 } 783 } 784 785 /* 786 * rtk_activate: 787 * Handle device activation/deactivation requests. 788 */ 789 int 790 rtk_activate(device_t self, enum devact act) 791 { 792 struct rtk_softc *sc = device_private(self); 793 794 switch (act) { 795 case DVACT_DEACTIVATE: 796 if_deactivate(&sc->ethercom.ec_if); 797 return 0; 798 default: 799 return EOPNOTSUPP; 800 } 801 } 802 803 /* 804 * rtk_detach: 805 * Detach a rtk interface. 806 */ 807 int 808 rtk_detach(struct rtk_softc *sc) 809 { 810 struct ifnet *ifp = &sc->ethercom.ec_if; 811 struct rtk_tx_desc *txd; 812 int i; 813 814 /* 815 * Succeed now if there isn't any work to do. 816 */ 817 if ((sc->sc_flags & RTK_ATTACHED) == 0) 818 return 0; 819 820 /* Unhook our tick handler. */ 821 callout_stop(&sc->rtk_tick_ch); 822 823 /* Detach all PHYs. */ 824 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY); 825 826 /* Delete all remaining media. */ 827 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY); 828 829 rnd_detach_source(&sc->rnd_source); 830 831 ether_ifdetach(ifp); 832 if_detach(ifp); 833 834 for (i = 0; i < RTK_TX_LIST_CNT; i++) { 835 txd = &sc->rtk_tx_descs[i]; 836 if (txd->txd_dmamap != NULL) 837 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); 838 } 839 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); 840 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, 841 RTK_RXBUFLEN + 16); 842 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); 843 844 /* we don't want to run again */ 845 sc->sc_flags &= ~RTK_ATTACHED; 846 847 return 0; 848 } 849 850 /* 851 * rtk_enable: 852 * Enable the RTL81X9 chip. 853 */ 854 int 855 rtk_enable(struct rtk_softc *sc) 856 { 857 858 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) { 859 if ((*sc->sc_enable)(sc) != 0) { 860 printf("%s: device enable failed\n", 861 device_xname(sc->sc_dev)); 862 return EIO; 863 } 864 sc->sc_flags |= RTK_ENABLED; 865 } 866 return 0; 867 } 868 869 /* 870 * rtk_disable: 871 * Disable the RTL81X9 chip. 872 */ 873 void 874 rtk_disable(struct rtk_softc *sc) 875 { 876 877 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) { 878 (*sc->sc_disable)(sc); 879 sc->sc_flags &= ~RTK_ENABLED; 880 } 881 } 882 883 /* 884 * A frame has been uploaded: pass the resulting mbuf chain up to 885 * the higher level protocols. 886 * 887 * You know there's something wrong with a PCI bus-master chip design. 888 * 889 * The receive operation is badly documented in the datasheet, so I'll 890 * attempt to document it here. The driver provides a buffer area and 891 * places its base address in the RX buffer start address register. 892 * The chip then begins copying frames into the RX buffer. Each frame 893 * is preceded by a 32-bit RX status word which specifies the length 894 * of the frame and certain other status bits. Each frame (starting with 895 * the status word) is also 32-bit aligned. The frame length is in the 896 * first 16 bits of the status word; the lower 15 bits correspond with 897 * the 'rx status register' mentioned in the datasheet. 898 * 899 * Note: to make the Alpha happy, the frame payload needs to be aligned 900 * on a 32-bit boundary. To achieve this, we copy the data to mbuf 901 * shifted forward 2 bytes. 902 */ 903 static void 904 rtk_rxeof(struct rtk_softc *sc) 905 { 906 struct mbuf *m; 907 struct ifnet *ifp; 908 uint8_t *rxbufpos, *dst; 909 u_int total_len, wrap; 910 uint32_t rxstat; 911 uint16_t cur_rx, new_rx; 912 uint16_t limit; 913 uint16_t rx_bytes, max_bytes; 914 915 ifp = &sc->ethercom.ec_if; 916 917 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN; 918 919 /* Do not try to read past this point. */ 920 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN; 921 922 if (limit < cur_rx) 923 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit; 924 else 925 max_bytes = limit - cur_rx; 926 rx_bytes = 0; 927 928 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) { 929 rxbufpos = sc->rtk_rx_buf + cur_rx; 930 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx, 931 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD); 932 rxstat = le32toh(*(uint32_t *)rxbufpos); 933 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx, 934 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD); 935 936 /* 937 * Here's a totally undocumented fact for you. When the 938 * RealTek chip is in the process of copying a packet into 939 * RAM for you, the length will be 0xfff0. If you spot a 940 * packet header with this value, you need to stop. The 941 * datasheet makes absolutely no mention of this and 942 * RealTek should be shot for this. 943 */ 944 total_len = rxstat >> 16; 945 if (total_len == RTK_RXSTAT_UNFINISHED) 946 break; 947 948 if ((rxstat & RTK_RXSTAT_RXOK) == 0 || 949 total_len < ETHER_MIN_LEN || 950 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) { 951 ifp->if_ierrors++; 952 953 /* 954 * submitted by:[netbsd-pcmcia:00484] 955 * Takahiro Kambe <taca@sky.yamashina.kyoto.jp> 956 * obtain from: 957 * FreeBSD if_rl.c rev 1.24->1.25 958 * 959 */ 960 #if 0 961 if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT| 962 RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR| 963 RTK_RXSTAT_ALIGNERR)) { 964 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB); 965 CSR_WRITE_2(sc, RTK_COMMAND, 966 RTK_CMD_TX_ENB|RTK_CMD_RX_ENB); 967 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG); 968 CSR_WRITE_4(sc, RTK_RXADDR, 969 sc->recv_dmamap->dm_segs[0].ds_addr); 970 cur_rx = 0; 971 } 972 break; 973 #else 974 rtk_init(ifp); 975 return; 976 #endif 977 } 978 979 /* No errors; receive the packet. */ 980 rx_bytes += total_len + RTK_RXSTAT_LEN; 981 982 /* 983 * Avoid trying to read more bytes than we know 984 * the chip has prepared for us. 985 */ 986 if (rx_bytes > max_bytes) 987 break; 988 989 /* 990 * Skip the status word, wrapping around to the beginning 991 * of the Rx area, if necessary. 992 */ 993 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN; 994 rxbufpos = sc->rtk_rx_buf + cur_rx; 995 996 /* 997 * Compute the number of bytes at which the packet 998 * will wrap to the beginning of the ring buffer. 999 */ 1000 wrap = RTK_RXBUFLEN - cur_rx; 1001 1002 /* 1003 * Compute where the next pending packet is. 1004 */ 1005 if (total_len > wrap) 1006 new_rx = total_len - wrap; 1007 else 1008 new_rx = cur_rx + total_len; 1009 /* Round up to 32-bit boundary. */ 1010 new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN; 1011 1012 /* 1013 * The RealTek chip includes the CRC with every 1014 * incoming packet; trim it off here. 1015 */ 1016 total_len -= ETHER_CRC_LEN; 1017 1018 /* 1019 * Now allocate an mbuf (and possibly a cluster) to hold 1020 * the packet. Note we offset the packet 2 bytes so that 1021 * data after the Ethernet header will be 4-byte aligned. 1022 */ 1023 MGETHDR(m, M_DONTWAIT, MT_DATA); 1024 if (m == NULL) { 1025 printf("%s: unable to allocate Rx mbuf\n", 1026 device_xname(sc->sc_dev)); 1027 ifp->if_ierrors++; 1028 goto next_packet; 1029 } 1030 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) { 1031 MCLGET(m, M_DONTWAIT); 1032 if ((m->m_flags & M_EXT) == 0) { 1033 printf("%s: unable to allocate Rx cluster\n", 1034 device_xname(sc->sc_dev)); 1035 ifp->if_ierrors++; 1036 m_freem(m); 1037 m = NULL; 1038 goto next_packet; 1039 } 1040 } 1041 m->m_data += RTK_ETHER_ALIGN; /* for alignment */ 1042 m_set_rcvif(m, ifp); 1043 m->m_pkthdr.len = m->m_len = total_len; 1044 dst = mtod(m, void *); 1045 1046 /* 1047 * If the packet wraps, copy up to the wrapping point. 1048 */ 1049 if (total_len > wrap) { 1050 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1051 cur_rx, wrap, BUS_DMASYNC_POSTREAD); 1052 memcpy(dst, rxbufpos, wrap); 1053 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1054 cur_rx, wrap, BUS_DMASYNC_PREREAD); 1055 cur_rx = 0; 1056 rxbufpos = sc->rtk_rx_buf; 1057 total_len -= wrap; 1058 dst += wrap; 1059 } 1060 1061 /* 1062 * ...and now the rest. 1063 */ 1064 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1065 cur_rx, total_len, BUS_DMASYNC_POSTREAD); 1066 memcpy(dst, rxbufpos, total_len); 1067 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 1068 cur_rx, total_len, BUS_DMASYNC_PREREAD); 1069 1070 next_packet: 1071 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN); 1072 cur_rx = new_rx; 1073 1074 if (m == NULL) 1075 continue; 1076 1077 /* pass it on. */ 1078 if_percpuq_enqueue(ifp->if_percpuq, m); 1079 } 1080 } 1081 1082 /* 1083 * A frame was downloaded to the chip. It's safe for us to clean up 1084 * the list buffers. 1085 */ 1086 static void 1087 rtk_txeof(struct rtk_softc *sc) 1088 { 1089 struct ifnet *ifp; 1090 struct rtk_tx_desc *txd; 1091 uint32_t txstat; 1092 1093 ifp = &sc->ethercom.ec_if; 1094 1095 /* 1096 * Go through our tx list and free mbufs for those 1097 * frames that have been uploaded. 1098 */ 1099 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) { 1100 txstat = CSR_READ_4(sc, txd->txd_txstat); 1101 if ((txstat & (RTK_TXSTAT_TX_OK| 1102 RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0) 1103 break; 1104 1105 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); 1106 1107 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0, 1108 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1109 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap); 1110 m_freem(txd->txd_mbuf); 1111 txd->txd_mbuf = NULL; 1112 1113 ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24; 1114 1115 if (txstat & RTK_TXSTAT_TX_OK) 1116 ifp->if_opackets++; 1117 else { 1118 ifp->if_oerrors++; 1119 1120 /* 1121 * Increase Early TX threshold if underrun occurred. 1122 * Increase step 64 bytes. 1123 */ 1124 if (txstat & RTK_TXSTAT_TX_UNDERRUN) { 1125 #ifdef DEBUG 1126 printf("%s: transmit underrun;", 1127 device_xname(sc->sc_dev)); 1128 #endif 1129 if (sc->sc_txthresh < RTK_TXTH_MAX) { 1130 sc->sc_txthresh += 2; 1131 #ifdef DEBUG 1132 printf(" new threshold: %d bytes", 1133 sc->sc_txthresh * 32); 1134 #endif 1135 } 1136 #ifdef DEBUG 1137 printf("\n"); 1138 #endif 1139 } 1140 if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN)) 1141 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG); 1142 } 1143 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q); 1144 ifp->if_flags &= ~IFF_OACTIVE; 1145 } 1146 1147 /* Clear the timeout timer if there is no pending packet. */ 1148 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty)) 1149 ifp->if_timer = 0; 1150 1151 } 1152 1153 int 1154 rtk_intr(void *arg) 1155 { 1156 struct rtk_softc *sc; 1157 struct ifnet *ifp; 1158 uint16_t status; 1159 int handled; 1160 1161 sc = arg; 1162 ifp = &sc->ethercom.ec_if; 1163 1164 if (!device_has_power(sc->sc_dev)) 1165 return 0; 1166 1167 /* Disable interrupts. */ 1168 CSR_WRITE_2(sc, RTK_IMR, 0x0000); 1169 1170 handled = 0; 1171 for (;;) { 1172 1173 status = CSR_READ_2(sc, RTK_ISR); 1174 1175 if (status == 0xffff) 1176 break; /* Card is gone... */ 1177 1178 if (status) 1179 CSR_WRITE_2(sc, RTK_ISR, status); 1180 1181 if ((status & RTK_INTRS) == 0) 1182 break; 1183 1184 handled = 1; 1185 1186 if (status & RTK_ISR_RX_OK) 1187 rtk_rxeof(sc); 1188 1189 if (status & RTK_ISR_RX_ERR) 1190 rtk_rxeof(sc); 1191 1192 if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR)) 1193 rtk_txeof(sc); 1194 1195 if (status & RTK_ISR_SYSTEM_ERR) { 1196 rtk_reset(sc); 1197 rtk_init(ifp); 1198 } 1199 } 1200 1201 /* Re-enable interrupts. */ 1202 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS); 1203 1204 if_schedule_deferred_start(ifp); 1205 1206 rnd_add_uint32(&sc->rnd_source, status); 1207 1208 return handled; 1209 } 1210 1211 /* 1212 * Main transmit routine. 1213 */ 1214 1215 static void 1216 rtk_start(struct ifnet *ifp) 1217 { 1218 struct rtk_softc *sc; 1219 struct rtk_tx_desc *txd; 1220 struct mbuf *m_head, *m_new; 1221 int error, len; 1222 1223 sc = ifp->if_softc; 1224 1225 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) { 1226 IFQ_POLL(&ifp->if_snd, m_head); 1227 if (m_head == NULL) 1228 break; 1229 m_new = NULL; 1230 1231 /* 1232 * Load the DMA map. If this fails, the packet didn't 1233 * fit in one DMA segment, and we need to copy. Note, 1234 * the packet must also be aligned. 1235 * if the packet is too small, copy it too, so we're sure 1236 * so have enough room for the pad buffer. 1237 */ 1238 if ((mtod(m_head, uintptr_t) & 3) != 0 || 1239 m_head->m_pkthdr.len < ETHER_PAD_LEN || 1240 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap, 1241 m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 1242 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1243 if (m_new == NULL) { 1244 printf("%s: unable to allocate Tx mbuf\n", 1245 device_xname(sc->sc_dev)); 1246 break; 1247 } 1248 if (m_head->m_pkthdr.len > MHLEN) { 1249 MCLGET(m_new, M_DONTWAIT); 1250 if ((m_new->m_flags & M_EXT) == 0) { 1251 printf("%s: unable to allocate Tx " 1252 "cluster\n", 1253 device_xname(sc->sc_dev)); 1254 m_freem(m_new); 1255 break; 1256 } 1257 } 1258 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1259 mtod(m_new, void *)); 1260 m_new->m_pkthdr.len = m_new->m_len = 1261 m_head->m_pkthdr.len; 1262 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) { 1263 memset( 1264 mtod(m_new, char *) + m_head->m_pkthdr.len, 1265 0, ETHER_PAD_LEN - m_head->m_pkthdr.len); 1266 m_new->m_pkthdr.len = m_new->m_len = 1267 ETHER_PAD_LEN; 1268 } 1269 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1270 txd->txd_dmamap, m_new, 1271 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1272 if (error) { 1273 printf("%s: unable to load Tx buffer, " 1274 "error = %d\n", 1275 device_xname(sc->sc_dev), error); 1276 break; 1277 } 1278 } 1279 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1280 /* 1281 * If there's a BPF listener, bounce a copy of this frame 1282 * to him. 1283 */ 1284 bpf_mtap(ifp, m_head); 1285 if (m_new != NULL) { 1286 m_freem(m_head); 1287 m_head = m_new; 1288 } 1289 txd->txd_mbuf = m_head; 1290 1291 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q); 1292 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q); 1293 1294 /* 1295 * Transmit the frame. 1296 */ 1297 bus_dmamap_sync(sc->sc_dmat, 1298 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize, 1299 BUS_DMASYNC_PREWRITE); 1300 1301 len = txd->txd_dmamap->dm_segs[0].ds_len; 1302 1303 CSR_WRITE_4(sc, txd->txd_txaddr, 1304 txd->txd_dmamap->dm_segs[0].ds_addr); 1305 CSR_WRITE_4(sc, txd->txd_txstat, 1306 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len); 1307 1308 /* 1309 * Set a timeout in case the chip goes out to lunch. 1310 */ 1311 ifp->if_timer = 5; 1312 } 1313 1314 /* 1315 * We broke out of the loop because all our TX slots are 1316 * full. Mark the NIC as busy until it drains some of the 1317 * packets from the queue. 1318 */ 1319 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free)) 1320 ifp->if_flags |= IFF_OACTIVE; 1321 } 1322 1323 static int 1324 rtk_init(struct ifnet *ifp) 1325 { 1326 struct rtk_softc *sc = ifp->if_softc; 1327 int error, i; 1328 uint32_t rxcfg; 1329 1330 if ((error = rtk_enable(sc)) != 0) 1331 goto out; 1332 1333 /* 1334 * Cancel pending I/O. 1335 */ 1336 rtk_stop(ifp, 0); 1337 1338 /* Init our MAC address */ 1339 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1340 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]); 1341 } 1342 1343 /* Init the RX buffer pointer register. */ 1344 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0, 1345 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1346 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr); 1347 1348 /* Init TX descriptors. */ 1349 rtk_list_tx_init(sc); 1350 1351 /* Init Early TX threshold. */ 1352 sc->sc_txthresh = RTK_TXTH_256; 1353 /* 1354 * Enable transmit and receive. 1355 */ 1356 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB); 1357 1358 /* 1359 * Set the initial TX and RX configuration. 1360 */ 1361 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG); 1362 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG); 1363 1364 /* Set the individual bit to receive frames for this host only. */ 1365 rxcfg = CSR_READ_4(sc, RTK_RXCFG); 1366 rxcfg |= RTK_RXCFG_RX_INDIV; 1367 1368 /* If we want promiscuous mode, set the allframes bit. */ 1369 if (ifp->if_flags & IFF_PROMISC) { 1370 rxcfg |= RTK_RXCFG_RX_ALLPHYS; 1371 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1372 } else { 1373 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS; 1374 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1375 } 1376 1377 /* 1378 * Set capture broadcast bit to capture broadcast frames. 1379 */ 1380 if (ifp->if_flags & IFF_BROADCAST) { 1381 rxcfg |= RTK_RXCFG_RX_BROAD; 1382 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1383 } else { 1384 rxcfg &= ~RTK_RXCFG_RX_BROAD; 1385 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg); 1386 } 1387 1388 /* 1389 * Program the multicast filter, if necessary. 1390 */ 1391 rtk_setmulti(sc); 1392 1393 /* 1394 * Enable interrupts. 1395 */ 1396 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS); 1397 1398 /* Start RX/TX process. */ 1399 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0); 1400 1401 /* Enable receiver and transmitter. */ 1402 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB); 1403 1404 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX); 1405 1406 /* 1407 * Set current media. 1408 */ 1409 if ((error = ether_mediachange(ifp)) != 0) 1410 goto out; 1411 1412 ifp->if_flags |= IFF_RUNNING; 1413 ifp->if_flags &= ~IFF_OACTIVE; 1414 1415 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc); 1416 1417 out: 1418 if (error) { 1419 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1420 ifp->if_timer = 0; 1421 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1422 } 1423 return error; 1424 } 1425 1426 static int 1427 rtk_ioctl(struct ifnet *ifp, u_long command, void *data) 1428 { 1429 struct rtk_softc *sc = ifp->if_softc; 1430 int s, error; 1431 1432 s = splnet(); 1433 error = ether_ioctl(ifp, command, data); 1434 if (error == ENETRESET) { 1435 if (ifp->if_flags & IFF_RUNNING) { 1436 /* 1437 * Multicast list has changed. Set the 1438 * hardware filter accordingly. 1439 */ 1440 rtk_setmulti(sc); 1441 } 1442 error = 0; 1443 } 1444 splx(s); 1445 1446 return error; 1447 } 1448 1449 static void 1450 rtk_watchdog(struct ifnet *ifp) 1451 { 1452 struct rtk_softc *sc; 1453 1454 sc = ifp->if_softc; 1455 1456 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1457 ifp->if_oerrors++; 1458 rtk_txeof(sc); 1459 rtk_rxeof(sc); 1460 rtk_init(ifp); 1461 } 1462 1463 /* 1464 * Stop the adapter and free any mbufs allocated to the 1465 * RX and TX lists. 1466 */ 1467 static void 1468 rtk_stop(struct ifnet *ifp, int disable) 1469 { 1470 struct rtk_softc *sc = ifp->if_softc; 1471 struct rtk_tx_desc *txd; 1472 1473 callout_stop(&sc->rtk_tick_ch); 1474 1475 mii_down(&sc->mii); 1476 1477 CSR_WRITE_1(sc, RTK_COMMAND, 0x00); 1478 CSR_WRITE_2(sc, RTK_IMR, 0x0000); 1479 1480 /* 1481 * Free the TX list buffers. 1482 */ 1483 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) { 1484 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); 1485 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap); 1486 m_freem(txd->txd_mbuf); 1487 txd->txd_mbuf = NULL; 1488 CSR_WRITE_4(sc, txd->txd_txaddr, 0); 1489 } 1490 1491 if (disable) 1492 rtk_disable(sc); 1493 1494 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1495 ifp->if_timer = 0; 1496 } 1497 1498 static void 1499 rtk_tick(void *arg) 1500 { 1501 struct rtk_softc *sc = arg; 1502 int s; 1503 1504 s = splnet(); 1505 mii_tick(&sc->mii); 1506 splx(s); 1507 1508 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc); 1509 } 1510