1 /* $OpenBSD: if_sis.c,v 1.116 2014/07/08 05:35:18 dlg Exp $ */ 2 /* 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $ 34 */ 35 36 /* 37 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are 38 * available from http://www.sis.com.tw. 39 * 40 * This driver also supports the NatSemi DP83815. Datasheets are 41 * available from http://www.national.com. 42 * 43 * Written by Bill Paul <wpaul@ee.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with 50 * simple TX and RX descriptors of 3 longwords in size. The receiver 51 * has a single perfect filter entry for the station address and a 52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based 53 * transceiver while the 7016 requires an external transceiver chip. 54 * Both chips offer the standard bit-bang MII interface as well as 55 * an enchanced PHY interface which simplifies accessing MII registers. 56 * 57 * The only downside to this chipset is that RX descriptors must be 58 * longword aligned. 59 */ 60 61 #include "bpfilter.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/mbuf.h> 66 #include <sys/protosw.h> 67 #include <sys/socket.h> 68 #include <sys/ioctl.h> 69 #include <sys/errno.h> 70 #include <sys/malloc.h> 71 #include <sys/kernel.h> 72 #include <sys/timeout.h> 73 74 #include <net/if.h> 75 #include <net/if_dl.h> 76 #include <net/if_types.h> 77 78 #ifdef INET 79 #include <netinet/in.h> 80 #include <netinet/in_systm.h> 81 #include <netinet/ip.h> 82 #include <netinet/if_ether.h> 83 #endif 84 85 #include <net/if_media.h> 86 87 #if NBPFILTER > 0 88 #include <net/bpf.h> 89 #endif 90 91 #include <sys/device.h> 92 93 #include <dev/mii/mii.h> 94 #include <dev/mii/miivar.h> 95 96 #include <dev/pci/pcireg.h> 97 #include <dev/pci/pcivar.h> 98 #include <dev/pci/pcidevs.h> 99 100 #define SIS_USEIOSPACE 101 102 #include <dev/pci/if_sisreg.h> 103 104 int sis_probe(struct device *, void *, void *); 105 void sis_attach(struct device *, struct device *, void *); 106 int sis_activate(struct device *, int); 107 108 struct cfattach sis_ca = { 109 sizeof(struct sis_softc), sis_probe, sis_attach, NULL, 110 sis_activate 111 }; 112 113 struct cfdriver sis_cd = { 114 NULL, "sis", DV_IFNET 115 }; 116 117 int sis_intr(void *); 118 void sis_fill_rx_ring(struct sis_softc *); 119 int sis_newbuf(struct sis_softc *, struct sis_desc *); 120 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *); 121 void sis_rxeof(struct sis_softc *); 122 void sis_txeof(struct sis_softc *); 123 void sis_tick(void *); 124 void sis_start(struct ifnet *); 125 int sis_ioctl(struct ifnet *, u_long, caddr_t); 126 void sis_init(void *); 127 void sis_stop(struct sis_softc *); 128 void sis_watchdog(struct ifnet *); 129 int sis_ifmedia_upd(struct ifnet *); 130 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 132 u_int16_t sis_reverse(u_int16_t); 133 void sis_delay(struct sis_softc *); 134 void sis_eeprom_idle(struct sis_softc *); 135 void sis_eeprom_putbyte(struct sis_softc *, int); 136 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *); 137 #if defined(__amd64__) || defined(__i386__) 138 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int); 139 #endif 140 void sis_read_mac(struct sis_softc *, struct pci_attach_args *); 141 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int); 142 void sis_read96x_mac(struct sis_softc *); 143 144 void sis_mii_sync(struct sis_softc *); 145 void sis_mii_send(struct sis_softc *, u_int32_t, int); 146 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *); 147 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *); 148 int sis_miibus_readreg(struct device *, int, int); 149 void sis_miibus_writereg(struct device *, int, int, int); 150 void sis_miibus_statchg(struct device *); 151 152 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *); 153 void sis_iff(struct sis_softc *); 154 void sis_iff_ns(struct sis_softc *); 155 void sis_iff_sis(struct sis_softc *); 156 void sis_reset(struct sis_softc *); 157 int sis_ring_init(struct sis_softc *); 158 159 #define SIS_SETBIT(sc, reg, x) \ 160 CSR_WRITE_4(sc, reg, \ 161 CSR_READ_4(sc, reg) | (x)) 162 163 #define SIS_CLRBIT(sc, reg, x) \ 164 CSR_WRITE_4(sc, reg, \ 165 CSR_READ_4(sc, reg) & ~(x)) 166 167 #define SIO_SET(x) \ 168 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) 169 170 #define SIO_CLR(x) \ 171 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) 172 173 const struct pci_matchid sis_devices[] = { 174 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 }, 175 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 }, 176 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 } 177 }; 178 179 /* 180 * Routine to reverse the bits in a word. Stolen almost 181 * verbatim from /usr/games/fortune. 182 */ 183 u_int16_t 184 sis_reverse(u_int16_t n) 185 { 186 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); 187 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); 188 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); 189 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); 190 191 return (n); 192 } 193 194 void 195 sis_delay(struct sis_softc *sc) 196 { 197 int idx; 198 199 for (idx = (300 / 33) + 1; idx > 0; idx--) 200 CSR_READ_4(sc, SIS_CSR); 201 } 202 203 void 204 sis_eeprom_idle(struct sis_softc *sc) 205 { 206 int i; 207 208 SIO_SET(SIS_EECTL_CSEL); 209 sis_delay(sc); 210 SIO_SET(SIS_EECTL_CLK); 211 sis_delay(sc); 212 213 for (i = 0; i < 25; i++) { 214 SIO_CLR(SIS_EECTL_CLK); 215 sis_delay(sc); 216 SIO_SET(SIS_EECTL_CLK); 217 sis_delay(sc); 218 } 219 220 SIO_CLR(SIS_EECTL_CLK); 221 sis_delay(sc); 222 SIO_CLR(SIS_EECTL_CSEL); 223 sis_delay(sc); 224 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); 225 } 226 227 /* 228 * Send a read command and address to the EEPROM, check for ACK. 229 */ 230 void 231 sis_eeprom_putbyte(struct sis_softc *sc, int addr) 232 { 233 int d, i; 234 235 d = addr | SIS_EECMD_READ; 236 237 /* 238 * Feed in each bit and strobe the clock. 239 */ 240 for (i = 0x400; i; i >>= 1) { 241 if (d & i) 242 SIO_SET(SIS_EECTL_DIN); 243 else 244 SIO_CLR(SIS_EECTL_DIN); 245 sis_delay(sc); 246 SIO_SET(SIS_EECTL_CLK); 247 sis_delay(sc); 248 SIO_CLR(SIS_EECTL_CLK); 249 sis_delay(sc); 250 } 251 } 252 253 /* 254 * Read a word of data stored in the EEPROM at address 'addr.' 255 */ 256 void 257 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest) 258 { 259 int i; 260 u_int16_t word = 0; 261 262 /* Force EEPROM to idle state. */ 263 sis_eeprom_idle(sc); 264 265 /* Enter EEPROM access mode. */ 266 sis_delay(sc); 267 SIO_CLR(SIS_EECTL_CLK); 268 sis_delay(sc); 269 SIO_SET(SIS_EECTL_CSEL); 270 sis_delay(sc); 271 272 /* 273 * Send address of word we want to read. 274 */ 275 sis_eeprom_putbyte(sc, addr); 276 277 /* 278 * Start reading bits from EEPROM. 279 */ 280 for (i = 0x8000; i; i >>= 1) { 281 SIO_SET(SIS_EECTL_CLK); 282 sis_delay(sc); 283 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) 284 word |= i; 285 sis_delay(sc); 286 SIO_CLR(SIS_EECTL_CLK); 287 sis_delay(sc); 288 } 289 290 /* Turn off EEPROM access mode. */ 291 sis_eeprom_idle(sc); 292 293 *dest = word; 294 } 295 296 /* 297 * Read a sequence of words from the EEPROM. 298 */ 299 void 300 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, 301 int off, int cnt, int swap) 302 { 303 int i; 304 u_int16_t word = 0, *ptr; 305 306 for (i = 0; i < cnt; i++) { 307 sis_eeprom_getword(sc, off + i, &word); 308 ptr = (u_int16_t *)(dest + (i * 2)); 309 if (swap) 310 *ptr = letoh16(word); 311 else 312 *ptr = word; 313 } 314 } 315 316 #if defined(__amd64__) || defined(__i386__) 317 void 318 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa, 319 caddr_t dest, int off, int cnt) 320 { 321 u_int32_t reg; 322 int i; 323 324 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48); 325 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40); 326 327 for (i = 0; i < cnt; i++) { 328 bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off); 329 *(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71); 330 } 331 332 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40); 333 } 334 #endif 335 336 void 337 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa) 338 { 339 u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr; 340 341 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RELOAD); 342 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_RELOAD); 343 344 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 345 346 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 347 enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff); 348 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 349 enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff); 350 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 351 enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff); 352 353 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 354 } 355 356 void 357 sis_read96x_mac(struct sis_softc *sc) 358 { 359 int i; 360 361 SIO_SET(SIS96x_EECTL_REQ); 362 363 for (i = 0; i < 2000; i++) { 364 if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) { 365 sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 366 SIS_EE_NODEADDR, 3, 1); 367 break; 368 } else 369 DELAY(1); 370 } 371 372 SIO_SET(SIS96x_EECTL_DONE); 373 } 374 375 /* 376 * Sync the PHYs by setting data bit and strobing the clock 32 times. 377 */ 378 void 379 sis_mii_sync(struct sis_softc *sc) 380 { 381 int i; 382 383 SIO_SET(SIS_MII_DIR|SIS_MII_DATA); 384 385 for (i = 0; i < 32; i++) { 386 SIO_SET(SIS_MII_CLK); 387 DELAY(1); 388 SIO_CLR(SIS_MII_CLK); 389 DELAY(1); 390 } 391 } 392 393 /* 394 * Clock a series of bits through the MII. 395 */ 396 void 397 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt) 398 { 399 int i; 400 401 SIO_CLR(SIS_MII_CLK); 402 403 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 404 if (bits & i) 405 SIO_SET(SIS_MII_DATA); 406 else 407 SIO_CLR(SIS_MII_DATA); 408 DELAY(1); 409 SIO_CLR(SIS_MII_CLK); 410 DELAY(1); 411 SIO_SET(SIS_MII_CLK); 412 } 413 } 414 415 /* 416 * Read an PHY register through the MII. 417 */ 418 int 419 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) 420 { 421 int i, ack, s; 422 423 s = splnet(); 424 425 /* 426 * Set up frame for RX. 427 */ 428 frame->mii_stdelim = SIS_MII_STARTDELIM; 429 frame->mii_opcode = SIS_MII_READOP; 430 frame->mii_turnaround = 0; 431 frame->mii_data = 0; 432 433 /* 434 * Turn on data xmit. 435 */ 436 SIO_SET(SIS_MII_DIR); 437 438 sis_mii_sync(sc); 439 440 /* 441 * Send command/address info. 442 */ 443 sis_mii_send(sc, frame->mii_stdelim, 2); 444 sis_mii_send(sc, frame->mii_opcode, 2); 445 sis_mii_send(sc, frame->mii_phyaddr, 5); 446 sis_mii_send(sc, frame->mii_regaddr, 5); 447 448 /* Idle bit */ 449 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); 450 DELAY(1); 451 SIO_SET(SIS_MII_CLK); 452 DELAY(1); 453 454 /* Turn off xmit. */ 455 SIO_CLR(SIS_MII_DIR); 456 457 /* Check for ack */ 458 SIO_CLR(SIS_MII_CLK); 459 DELAY(1); 460 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; 461 SIO_SET(SIS_MII_CLK); 462 DELAY(1); 463 464 /* 465 * Now try reading data bits. If the ack failed, we still 466 * need to clock through 16 cycles to keep the PHY(s) in sync. 467 */ 468 if (ack) { 469 for(i = 0; i < 16; i++) { 470 SIO_CLR(SIS_MII_CLK); 471 DELAY(1); 472 SIO_SET(SIS_MII_CLK); 473 DELAY(1); 474 } 475 goto fail; 476 } 477 478 for (i = 0x8000; i; i >>= 1) { 479 SIO_CLR(SIS_MII_CLK); 480 DELAY(1); 481 if (!ack) { 482 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) 483 frame->mii_data |= i; 484 DELAY(1); 485 } 486 SIO_SET(SIS_MII_CLK); 487 DELAY(1); 488 } 489 490 fail: 491 492 SIO_CLR(SIS_MII_CLK); 493 DELAY(1); 494 SIO_SET(SIS_MII_CLK); 495 DELAY(1); 496 497 splx(s); 498 499 if (ack) 500 return (1); 501 return (0); 502 } 503 504 /* 505 * Write to a PHY register through the MII. 506 */ 507 int 508 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) 509 { 510 int s; 511 512 s = splnet(); 513 /* 514 * Set up frame for TX. 515 */ 516 517 frame->mii_stdelim = SIS_MII_STARTDELIM; 518 frame->mii_opcode = SIS_MII_WRITEOP; 519 frame->mii_turnaround = SIS_MII_TURNAROUND; 520 521 /* 522 * Turn on data output. 523 */ 524 SIO_SET(SIS_MII_DIR); 525 526 sis_mii_sync(sc); 527 528 sis_mii_send(sc, frame->mii_stdelim, 2); 529 sis_mii_send(sc, frame->mii_opcode, 2); 530 sis_mii_send(sc, frame->mii_phyaddr, 5); 531 sis_mii_send(sc, frame->mii_regaddr, 5); 532 sis_mii_send(sc, frame->mii_turnaround, 2); 533 sis_mii_send(sc, frame->mii_data, 16); 534 535 /* Idle bit. */ 536 SIO_SET(SIS_MII_CLK); 537 DELAY(1); 538 SIO_CLR(SIS_MII_CLK); 539 DELAY(1); 540 541 /* 542 * Turn off xmit. 543 */ 544 SIO_CLR(SIS_MII_DIR); 545 546 splx(s); 547 548 return (0); 549 } 550 551 int 552 sis_miibus_readreg(struct device *self, int phy, int reg) 553 { 554 struct sis_softc *sc = (struct sis_softc *)self; 555 struct sis_mii_frame frame; 556 557 if (sc->sis_type == SIS_TYPE_83815) { 558 if (phy != 0) 559 return (0); 560 /* 561 * The NatSemi chip can take a while after 562 * a reset to come ready, during which the BMSR 563 * returns a value of 0. This is *never* supposed 564 * to happen: some of the BMSR bits are meant to 565 * be hardwired in the on position, and this can 566 * confuse the miibus code a bit during the probe 567 * and attach phase. So we make an effort to check 568 * for this condition and wait for it to clear. 569 */ 570 if (!CSR_READ_4(sc, NS_BMSR)) 571 DELAY(1000); 572 return CSR_READ_4(sc, NS_BMCR + (reg * 4)); 573 } 574 575 /* 576 * Chipsets < SIS_635 seem not to be able to read/write 577 * through mdio. Use the enhanced PHY access register 578 * again for them. 579 */ 580 if (sc->sis_type == SIS_TYPE_900 && 581 sc->sis_rev < SIS_REV_635) { 582 int i, val = 0; 583 584 if (phy != 0) 585 return (0); 586 587 CSR_WRITE_4(sc, SIS_PHYCTL, 588 (phy << 11) | (reg << 6) | SIS_PHYOP_READ); 589 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 590 591 for (i = 0; i < SIS_TIMEOUT; i++) { 592 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 593 break; 594 } 595 596 if (i == SIS_TIMEOUT) { 597 printf("%s: PHY failed to come ready\n", 598 sc->sc_dev.dv_xname); 599 return (0); 600 } 601 602 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; 603 604 if (val == 0xFFFF) 605 return (0); 606 607 return (val); 608 } else { 609 bzero(&frame, sizeof(frame)); 610 611 frame.mii_phyaddr = phy; 612 frame.mii_regaddr = reg; 613 sis_mii_readreg(sc, &frame); 614 615 return (frame.mii_data); 616 } 617 } 618 619 void 620 sis_miibus_writereg(struct device *self, int phy, int reg, int data) 621 { 622 struct sis_softc *sc = (struct sis_softc *)self; 623 struct sis_mii_frame frame; 624 625 if (sc->sis_type == SIS_TYPE_83815) { 626 if (phy != 0) 627 return; 628 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); 629 return; 630 } 631 632 /* 633 * Chipsets < SIS_635 seem not to be able to read/write 634 * through mdio. Use the enhanced PHY access register 635 * again for them. 636 */ 637 if (sc->sis_type == SIS_TYPE_900 && 638 sc->sis_rev < SIS_REV_635) { 639 int i; 640 641 if (phy != 0) 642 return; 643 644 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | 645 (reg << 6) | SIS_PHYOP_WRITE); 646 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 647 648 for (i = 0; i < SIS_TIMEOUT; i++) { 649 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 650 break; 651 } 652 653 if (i == SIS_TIMEOUT) 654 printf("%s: PHY failed to come ready\n", 655 sc->sc_dev.dv_xname); 656 } else { 657 bzero(&frame, sizeof(frame)); 658 659 frame.mii_phyaddr = phy; 660 frame.mii_regaddr = reg; 661 frame.mii_data = data; 662 sis_mii_writereg(sc, &frame); 663 } 664 } 665 666 void 667 sis_miibus_statchg(struct device *self) 668 { 669 struct sis_softc *sc = (struct sis_softc *)self; 670 struct ifnet *ifp = &sc->arpcom.ac_if; 671 struct mii_data *mii = &sc->sc_mii; 672 673 if ((ifp->if_flags & IFF_RUNNING) == 0) 674 return; 675 676 sc->sis_link = 0; 677 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 678 (IFM_ACTIVE | IFM_AVALID)) { 679 switch (IFM_SUBTYPE(mii->mii_media_active)) { 680 case IFM_10_T: 681 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); 682 sc->sis_link++; 683 break; 684 case IFM_100_TX: 685 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 686 sc->sis_link++; 687 break; 688 default: 689 break; 690 } 691 } 692 693 if (!sc->sis_link) { 694 /* 695 * Stopping MACs seem to reset SIS_TX_LISTPTR and 696 * SIS_RX_LISTPTR which in turn requires resetting 697 * TX/RX buffers. So just don't do anything for 698 * lost link. 699 */ 700 return; 701 } 702 703 /* Set full/half duplex mode. */ 704 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 705 SIS_SETBIT(sc, SIS_TX_CFG, 706 (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR)); 707 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 708 } else { 709 SIS_CLRBIT(sc, SIS_TX_CFG, 710 (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR)); 711 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 712 } 713 714 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { 715 /* 716 * MPII03.D: Half Duplex Excessive Collisions. 717 * Also page 49 in 83816 manual 718 */ 719 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); 720 } 721 722 /* 723 * Some DP83815s experience problems when used with short 724 * (< 30m/100ft) Ethernet cables in 100baseTX mode. This 725 * sequence adjusts the DSP's signal attenuation to fix the 726 * problem. 727 */ 728 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && 729 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 730 uint32_t reg; 731 732 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 733 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; 734 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); 735 DELAY(100); 736 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; 737 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { 738 #ifdef DEBUG 739 printf("%s: Applying short cable fix (reg=%x)\n", 740 sc->sc_dev.dv_xname, reg); 741 #endif 742 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); 743 SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20); 744 } 745 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 746 } 747 /* Enable TX/RX MACs. */ 748 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE); 749 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE); 750 } 751 752 u_int32_t 753 sis_mchash(struct sis_softc *sc, const uint8_t *addr) 754 { 755 uint32_t crc; 756 757 /* Compute CRC for the address value. */ 758 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 759 760 /* 761 * return the filter bit position 762 * 763 * The NatSemi chip has a 512-bit filter, which is 764 * different than the SiS, so we special-case it. 765 */ 766 if (sc->sis_type == SIS_TYPE_83815) 767 return (crc >> 23); 768 else if (sc->sis_rev >= SIS_REV_635 || 769 sc->sis_rev == SIS_REV_900B) 770 return (crc >> 24); 771 else 772 return (crc >> 25); 773 } 774 775 void 776 sis_iff(struct sis_softc *sc) 777 { 778 if (sc->sis_type == SIS_TYPE_83815) 779 sis_iff_ns(sc); 780 else 781 sis_iff_sis(sc); 782 } 783 784 void 785 sis_iff_ns(struct sis_softc *sc) 786 { 787 struct ifnet *ifp = &sc->arpcom.ac_if; 788 struct arpcom *ac = &sc->arpcom; 789 struct ether_multi *enm; 790 struct ether_multistep step; 791 u_int32_t h = 0, i, rxfilt; 792 int bit, index; 793 794 rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL); 795 rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS | 796 NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH | 797 NS_RXFILTCTL_PERFECT); 798 ifp->if_flags &= ~IFF_ALLMULTI; 799 800 /* 801 * Always accept ARP frames. 802 * Always accept broadcast frames. 803 * Always accept frames destined to our station address. 804 */ 805 rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | 806 NS_RXFILTCTL_PERFECT; 807 808 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 809 ifp->if_flags |= IFF_ALLMULTI; 810 rxfilt |= SIS_RXFILTCTL_ALLMULTI; 811 if (ifp->if_flags & IFF_PROMISC) 812 rxfilt |= SIS_RXFILTCTL_ALLPHYS; 813 } else { 814 /* 815 * We have to explicitly enable the multicast hash table 816 * on the NatSemi chip if we want to use it, which we do. 817 */ 818 rxfilt |= NS_RXFILTCTL_MCHASH; 819 820 /* first, zot all the existing hash bits */ 821 for (i = 0; i < 32; i++) { 822 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); 823 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); 824 } 825 826 ETHER_FIRST_MULTI(step, ac, enm); 827 while (enm != NULL) { 828 h = sis_mchash(sc, enm->enm_addrlo); 829 830 index = h >> 3; 831 bit = h & 0x1F; 832 833 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); 834 835 if (bit > 0xF) 836 bit -= 0x10; 837 838 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); 839 840 ETHER_NEXT_MULTI(step, enm); 841 } 842 } 843 844 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt); 845 } 846 847 void 848 sis_iff_sis(struct sis_softc *sc) 849 { 850 struct ifnet *ifp = &sc->arpcom.ac_if; 851 struct arpcom *ac = &sc->arpcom; 852 struct ether_multi *enm; 853 struct ether_multistep step; 854 u_int32_t h, i, maxmulti, rxfilt; 855 u_int16_t hashes[16]; 856 857 /* hash table size */ 858 if (sc->sis_rev >= SIS_REV_635 || 859 sc->sis_rev == SIS_REV_900B) 860 maxmulti = 16; 861 else 862 maxmulti = 8; 863 864 rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL); 865 rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS | 866 SIS_RXFILTCTL_BROAD); 867 ifp->if_flags &= ~IFF_ALLMULTI; 868 869 /* 870 * Always accept broadcast frames. 871 */ 872 rxfilt |= SIS_RXFILTCTL_BROAD; 873 874 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 875 ac->ac_multicnt > maxmulti) { 876 ifp->if_flags |= IFF_ALLMULTI; 877 rxfilt |= SIS_RXFILTCTL_ALLMULTI; 878 if (ifp->if_flags & IFF_PROMISC) 879 rxfilt |= SIS_RXFILTCTL_ALLPHYS; 880 881 for (i = 0; i < maxmulti; i++) 882 hashes[i] = ~0; 883 } else { 884 for (i = 0; i < maxmulti; i++) 885 hashes[i] = 0; 886 887 ETHER_FIRST_MULTI(step, ac, enm); 888 while (enm != NULL) { 889 h = sis_mchash(sc, enm->enm_addrlo); 890 891 hashes[h >> 4] |= 1 << (h & 0xf); 892 893 ETHER_NEXT_MULTI(step, enm); 894 } 895 } 896 897 for (i = 0; i < maxmulti; i++) { 898 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); 899 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); 900 } 901 902 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt); 903 } 904 905 void 906 sis_reset(struct sis_softc *sc) 907 { 908 int i; 909 910 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); 911 912 for (i = 0; i < SIS_TIMEOUT; i++) { 913 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) 914 break; 915 } 916 917 if (i == SIS_TIMEOUT) 918 printf("%s: reset never completed\n", sc->sc_dev.dv_xname); 919 920 /* Wait a little while for the chip to get its brains in order. */ 921 DELAY(1000); 922 923 /* 924 * If this is a NetSemi chip, make sure to clear 925 * PME mode. 926 */ 927 if (sc->sis_type == SIS_TYPE_83815) { 928 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); 929 CSR_WRITE_4(sc, NS_CLKRUN, 0); 930 } 931 } 932 933 /* 934 * Probe for an SiS chip. Check the PCI vendor and device 935 * IDs against our list and return a device name if we find a match. 936 */ 937 int 938 sis_probe(struct device *parent, void *match, void *aux) 939 { 940 return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices, 941 nitems(sis_devices))); 942 } 943 944 /* 945 * Attach the interface. Allocate softc structures, do ifmedia 946 * setup and ethernet/BPF attach. 947 */ 948 void 949 sis_attach(struct device *parent, struct device *self, void *aux) 950 { 951 int i; 952 const char *intrstr = NULL; 953 struct sis_softc *sc = (struct sis_softc *)self; 954 struct pci_attach_args *pa = aux; 955 pci_chipset_tag_t pc = pa->pa_pc; 956 pci_intr_handle_t ih; 957 struct ifnet *ifp; 958 bus_size_t size; 959 960 sc->sis_stopped = 1; 961 962 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 963 964 /* 965 * Map control/status registers. 966 */ 967 968 #ifdef SIS_USEIOSPACE 969 if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 970 &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) { 971 printf(": can't map i/o space\n"); 972 return; 973 } 974 #else 975 if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 976 &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) { 977 printf(": can't map mem space\n"); 978 return; 979 } 980 #endif 981 982 /* Allocate interrupt */ 983 if (pci_intr_map(pa, &ih)) { 984 printf(": couldn't map interrupt\n"); 985 goto fail_1; 986 } 987 intrstr = pci_intr_string(pc, ih); 988 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc, 989 self->dv_xname); 990 if (sc->sc_ih == NULL) { 991 printf(": couldn't establish interrupt"); 992 if (intrstr != NULL) 993 printf(" at %s", intrstr); 994 printf("\n"); 995 goto fail_1; 996 } 997 998 switch (PCI_PRODUCT(pa->pa_id)) { 999 case PCI_PRODUCT_SIS_900: 1000 sc->sis_type = SIS_TYPE_900; 1001 break; 1002 case PCI_PRODUCT_SIS_7016: 1003 sc->sis_type = SIS_TYPE_7016; 1004 break; 1005 case PCI_PRODUCT_NS_DP83815: 1006 sc->sis_type = SIS_TYPE_83815; 1007 break; 1008 default: 1009 break; 1010 } 1011 sc->sis_rev = PCI_REVISION(pa->pa_class); 1012 1013 /* Reset the adapter. */ 1014 sis_reset(sc); 1015 1016 if (sc->sis_type == SIS_TYPE_900 && 1017 (sc->sis_rev == SIS_REV_635 || 1018 sc->sis_rev == SIS_REV_900B)) { 1019 SIO_SET(SIS_CFG_RND_CNT); 1020 SIO_SET(SIS_CFG_PERR_DETECT); 1021 } 1022 1023 /* 1024 * Get station address from the EEPROM. 1025 */ 1026 switch (PCI_VENDOR(pa->pa_id)) { 1027 case PCI_VENDOR_NS: 1028 sc->sis_srr = CSR_READ_4(sc, NS_SRR); 1029 1030 if (sc->sis_srr == NS_SRR_15C) 1031 printf(", DP83815C"); 1032 else if (sc->sis_srr == NS_SRR_15D) 1033 printf(", DP83815D"); 1034 else if (sc->sis_srr == NS_SRR_16A) 1035 printf(", DP83816A"); 1036 else 1037 printf(", srr %x", sc->sis_srr); 1038 1039 /* 1040 * Reading the MAC address out of the EEPROM on 1041 * the NatSemi chip takes a bit more work than 1042 * you'd expect. The address spans 4 16-bit words, 1043 * with the first word containing only a single bit. 1044 * You have to shift everything over one bit to 1045 * get it aligned properly. Also, the bits are 1046 * stored backwards (the LSB is really the MSB, 1047 * and so on) so you have to reverse them in order 1048 * to get the MAC address into the form we want. 1049 * Why? Who the hell knows. 1050 */ 1051 { 1052 u_int16_t tmp[4]; 1053 1054 sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR, 1055 4, 0); 1056 1057 /* Shift everything over one bit. */ 1058 tmp[3] = tmp[3] >> 1; 1059 tmp[3] |= tmp[2] << 15; 1060 tmp[2] = tmp[2] >> 1; 1061 tmp[2] |= tmp[1] << 15; 1062 tmp[1] = tmp[1] >> 1; 1063 tmp[1] |= tmp[0] << 15; 1064 1065 /* Now reverse all the bits. */ 1066 tmp[3] = letoh16(sis_reverse(tmp[3])); 1067 tmp[2] = letoh16(sis_reverse(tmp[2])); 1068 tmp[1] = letoh16(sis_reverse(tmp[1])); 1069 1070 bcopy(&tmp[1], sc->arpcom.ac_enaddr, 1071 ETHER_ADDR_LEN); 1072 } 1073 break; 1074 case PCI_VENDOR_SIS: 1075 default: 1076 #if defined(__amd64__) || defined(__i386__) 1077 /* 1078 * If this is a SiS 630E chipset with an embedded 1079 * SiS 900 controller, we have to read the MAC address 1080 * from the APC CMOS RAM. Our method for doing this 1081 * is very ugly since we have to reach out and grab 1082 * ahold of hardware for which we cannot properly 1083 * allocate resources. This code is only compiled on 1084 * the i386 architecture since the SiS 630E chipset 1085 * is for x86 motherboards only. Note that there are 1086 * a lot of magic numbers in this hack. These are 1087 * taken from SiS's Linux driver. I'd like to replace 1088 * them with proper symbolic definitions, but that 1089 * requires some datasheets that I don't have access 1090 * to at the moment. 1091 */ 1092 if (sc->sis_rev == SIS_REV_630S || 1093 sc->sis_rev == SIS_REV_630E) 1094 sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr, 1095 0x9, 6); 1096 else 1097 #endif 1098 if (sc->sis_rev == SIS_REV_96x) 1099 sis_read96x_mac(sc); 1100 else if (sc->sis_rev == SIS_REV_635 || 1101 sc->sis_rev == SIS_REV_630ET || 1102 sc->sis_rev == SIS_REV_630EA1) 1103 sis_read_mac(sc, pa); 1104 else 1105 sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1106 SIS_EE_NODEADDR, 3, 1); 1107 break; 1108 } 1109 1110 printf(": %s, address %s\n", intrstr, 1111 ether_sprintf(sc->arpcom.ac_enaddr)); 1112 1113 sc->sc_dmat = pa->pa_dmat; 1114 1115 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data), 1116 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 1117 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { 1118 printf(": can't alloc list mem\n"); 1119 goto fail_2; 1120 } 1121 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 1122 sizeof(struct sis_list_data), &sc->sc_listkva, 1123 BUS_DMA_NOWAIT) != 0) { 1124 printf(": can't map list mem\n"); 1125 goto fail_2; 1126 } 1127 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1, 1128 sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT, 1129 &sc->sc_listmap) != 0) { 1130 printf(": can't alloc list map\n"); 1131 goto fail_2; 1132 } 1133 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 1134 sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 1135 printf(": can't load list map\n"); 1136 goto fail_2; 1137 } 1138 sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva; 1139 1140 for (i = 0; i < SIS_RX_LIST_CNT; i++) { 1141 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1142 BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) { 1143 printf(": can't create rx map\n"); 1144 goto fail_2; 1145 } 1146 } 1147 1148 for (i = 0; i < SIS_TX_LIST_CNT; i++) { 1149 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1150 SIS_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT, 1151 &sc->sis_ldata->sis_tx_list[i].map) != 0) { 1152 printf(": can't create tx map\n"); 1153 goto fail_2; 1154 } 1155 } 1156 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_TX_LIST_CNT - 3, 1157 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 1158 printf(": can't create tx spare map\n"); 1159 goto fail_2; 1160 } 1161 1162 timeout_set(&sc->sis_timeout, sis_tick, sc); 1163 1164 ifp = &sc->arpcom.ac_if; 1165 ifp->if_softc = sc; 1166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1167 ifp->if_ioctl = sis_ioctl; 1168 ifp->if_start = sis_start; 1169 ifp->if_watchdog = sis_watchdog; 1170 IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); 1171 IFQ_SET_READY(&ifp->if_snd); 1172 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1173 ifp->if_hardmtu = 1518; /* determined experimentally on DP83815 */ 1174 1175 ifp->if_capabilities = IFCAP_VLAN_MTU; 1176 1177 sc->sc_mii.mii_ifp = ifp; 1178 sc->sc_mii.mii_readreg = sis_miibus_readreg; 1179 sc->sc_mii.mii_writereg = sis_miibus_writereg; 1180 sc->sc_mii.mii_statchg = sis_miibus_statchg; 1181 ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts); 1182 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 1183 0); 1184 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1185 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1186 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1187 } else 1188 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1189 1190 /* 1191 * Call MI attach routines. 1192 */ 1193 if_attach(ifp); 1194 ether_ifattach(ifp); 1195 return; 1196 1197 fail_2: 1198 pci_intr_disestablish(pc, sc->sc_ih); 1199 1200 fail_1: 1201 bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size); 1202 } 1203 1204 int 1205 sis_activate(struct device *self, int act) 1206 { 1207 struct sis_softc *sc = (struct sis_softc *)self; 1208 struct ifnet *ifp = &sc->arpcom.ac_if; 1209 int rv = 0; 1210 1211 switch (act) { 1212 case DVACT_SUSPEND: 1213 if (ifp->if_flags & IFF_RUNNING) 1214 sis_stop(sc); 1215 rv = config_activate_children(self, act); 1216 break; 1217 case DVACT_RESUME: 1218 if (ifp->if_flags & IFF_UP) 1219 sis_init(sc); 1220 break; 1221 default: 1222 rv = config_activate_children(self, act); 1223 break; 1224 } 1225 return (rv); 1226 } 1227 1228 /* 1229 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that 1230 * we arrange the descriptors in a closed ring, so that the last descriptor 1231 * points back to the first. 1232 */ 1233 int 1234 sis_ring_init(struct sis_softc *sc) 1235 { 1236 struct sis_list_data *ld; 1237 struct sis_ring_data *cd; 1238 int i, nexti; 1239 1240 cd = &sc->sis_cdata; 1241 ld = sc->sis_ldata; 1242 1243 for (i = 0; i < SIS_TX_LIST_CNT; i++) { 1244 if (i == (SIS_TX_LIST_CNT - 1)) 1245 nexti = 0; 1246 else 1247 nexti = i + 1; 1248 ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti]; 1249 ld->sis_tx_list[i].sis_next = 1250 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 1251 offsetof(struct sis_list_data, sis_tx_list[nexti])); 1252 ld->sis_tx_list[i].sis_mbuf = NULL; 1253 ld->sis_tx_list[i].sis_ptr = 0; 1254 ld->sis_tx_list[i].sis_ctl = 0; 1255 } 1256 1257 cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0; 1258 1259 for (i = 0; i < SIS_RX_LIST_CNT; i++) { 1260 if (i == SIS_RX_LIST_CNT - 1) 1261 nexti = 0; 1262 else 1263 nexti = i + 1; 1264 ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti]; 1265 ld->sis_rx_list[i].sis_next = 1266 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 1267 offsetof(struct sis_list_data, sis_rx_list[nexti])); 1268 ld->sis_rx_list[i].sis_ctl = 0; 1269 } 1270 1271 cd->sis_rx_prod = cd->sis_rx_cons; 1272 if_rxr_init(&cd->sis_rx_ring, 2, SIS_RX_LIST_CNT - 1); 1273 sis_fill_rx_ring(sc); 1274 1275 return (0); 1276 } 1277 1278 void 1279 sis_fill_rx_ring(struct sis_softc *sc) 1280 { 1281 struct sis_list_data *ld; 1282 struct sis_ring_data *cd; 1283 u_int slots; 1284 1285 cd = &sc->sis_cdata; 1286 ld = sc->sis_ldata; 1287 1288 for (slots = if_rxr_get(&cd->sis_rx_ring, SIS_RX_LIST_CNT); 1289 slots > 0; slots--) { 1290 if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod])) 1291 break; 1292 1293 SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT); 1294 } 1295 if_rxr_put(&cd->sis_rx_ring, slots); 1296 } 1297 1298 /* 1299 * Initialize an RX descriptor and attach an MBUF cluster. 1300 */ 1301 int 1302 sis_newbuf(struct sis_softc *sc, struct sis_desc *c) 1303 { 1304 struct mbuf *m_new = NULL; 1305 1306 if (c == NULL) 1307 return (EINVAL); 1308 1309 m_new = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1310 if (!m_new) 1311 return (ENOBUFS); 1312 1313 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1314 1315 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new, 1316 BUS_DMA_NOWAIT)) { 1317 m_free(m_new); 1318 return (ENOBUFS); 1319 } 1320 1321 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1322 BUS_DMASYNC_PREREAD); 1323 1324 c->sis_mbuf = m_new; 1325 c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr); 1326 1327 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1328 ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc), 1329 BUS_DMASYNC_PREWRITE); 1330 1331 c->sis_ctl = htole32(ETHER_MAX_DIX_LEN); 1332 1333 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1334 ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc), 1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1336 1337 return (0); 1338 } 1339 1340 /* 1341 * A frame has been uploaded: pass the resulting mbuf chain up to 1342 * the higher level protocols. 1343 */ 1344 void 1345 sis_rxeof(struct sis_softc *sc) 1346 { 1347 struct mbuf *m; 1348 struct ifnet *ifp; 1349 struct sis_desc *cur_rx; 1350 int total_len = 0; 1351 u_int32_t rxstat; 1352 1353 ifp = &sc->arpcom.ac_if; 1354 1355 while (if_rxr_inuse(&sc->sis_cdata.sis_rx_ring) > 0) { 1356 cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons]; 1357 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1358 ((caddr_t)cur_rx - sc->sc_listkva), 1359 sizeof(struct sis_desc), 1360 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1361 if (!SIS_OWNDESC(cur_rx)) 1362 break; 1363 1364 rxstat = letoh32(cur_rx->sis_rxstat); 1365 m = cur_rx->sis_mbuf; 1366 cur_rx->sis_mbuf = NULL; 1367 total_len = SIS_RXBYTES(cur_rx); 1368 /* from here on the buffer is consumed */ 1369 SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT); 1370 if_rxr_put(&sc->sis_cdata.sis_rx_ring, 1); 1371 1372 /* 1373 * If an error occurs, update stats, clear the 1374 * status word and leave the mbuf cluster in place: 1375 * it should simply get re-used next time this descriptor 1376 * comes up in the ring. However, don't report long 1377 * frames as errors since they could be VLANs. 1378 */ 1379 if (rxstat & SIS_RXSTAT_GIANT && 1380 total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN)) 1381 rxstat &= ~SIS_RXSTAT_GIANT; 1382 if (SIS_RXSTAT_ERROR(rxstat)) { 1383 ifp->if_ierrors++; 1384 if (rxstat & SIS_RXSTAT_COLL) 1385 ifp->if_collisions++; 1386 m_freem(m); 1387 continue; 1388 } 1389 1390 /* No errors; receive the packet. */ 1391 bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0, 1392 cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1393 #ifdef __STRICT_ALIGNMENT 1394 /* 1395 * On some architectures, we do not have alignment problems, 1396 * so try to allocate a new buffer for the receive ring, and 1397 * pass up the one where the packet is already, saving the 1398 * expensive copy done in m_devget(). 1399 * If we are on an architecture with alignment problems, or 1400 * if the allocation fails, then use m_devget and leave the 1401 * existing buffer in the receive ring. 1402 */ 1403 { 1404 struct mbuf *m0; 1405 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 1406 ifp); 1407 m_freem(m); 1408 if (m0 == NULL) { 1409 ifp->if_ierrors++; 1410 continue; 1411 } 1412 m = m0; 1413 } 1414 #else 1415 m->m_pkthdr.rcvif = ifp; 1416 m->m_pkthdr.len = m->m_len = total_len; 1417 #endif 1418 ifp->if_ipackets++; 1419 1420 #if NBPFILTER > 0 1421 if (ifp->if_bpf) 1422 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1423 #endif 1424 1425 /* pass it on. */ 1426 ether_input_mbuf(ifp, m); 1427 } 1428 1429 sis_fill_rx_ring(sc); 1430 } 1431 1432 /* 1433 * A frame was downloaded to the chip. It's safe for us to clean up 1434 * the list buffers. 1435 */ 1436 1437 void 1438 sis_txeof(struct sis_softc *sc) 1439 { 1440 struct ifnet *ifp; 1441 u_int32_t idx, ctl, txstat; 1442 1443 ifp = &sc->arpcom.ac_if; 1444 1445 /* 1446 * Go through our tx list and free mbufs for those 1447 * frames that have been transmitted. 1448 */ 1449 for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0; 1450 sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) { 1451 struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx]; 1452 1453 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1454 ((caddr_t)cur_tx - sc->sc_listkva), 1455 sizeof(struct sis_desc), 1456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1457 1458 if (SIS_OWNDESC(cur_tx)) 1459 break; 1460 1461 ctl = letoh32(cur_tx->sis_ctl); 1462 1463 if (ctl & SIS_CMDSTS_MORE) 1464 continue; 1465 1466 txstat = letoh32(cur_tx->sis_txstat); 1467 1468 if (!(ctl & SIS_CMDSTS_PKT_OK)) { 1469 ifp->if_oerrors++; 1470 if (txstat & SIS_TXSTAT_EXCESSCOLLS) 1471 ifp->if_collisions++; 1472 if (txstat & SIS_TXSTAT_OUTOFWINCOLL) 1473 ifp->if_collisions++; 1474 } 1475 1476 ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16; 1477 1478 ifp->if_opackets++; 1479 if (cur_tx->map->dm_nsegs != 0) { 1480 bus_dmamap_t map = cur_tx->map; 1481 1482 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1483 BUS_DMASYNC_POSTWRITE); 1484 bus_dmamap_unload(sc->sc_dmat, map); 1485 } 1486 if (cur_tx->sis_mbuf != NULL) { 1487 m_freem(cur_tx->sis_mbuf); 1488 cur_tx->sis_mbuf = NULL; 1489 } 1490 } 1491 1492 if (idx != sc->sis_cdata.sis_tx_cons) { 1493 /* we freed up some buffers */ 1494 sc->sis_cdata.sis_tx_cons = idx; 1495 ifp->if_flags &= ~IFF_OACTIVE; 1496 } 1497 1498 ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5; 1499 } 1500 1501 void 1502 sis_tick(void *xsc) 1503 { 1504 struct sis_softc *sc = (struct sis_softc *)xsc; 1505 struct mii_data *mii; 1506 int s; 1507 1508 s = splnet(); 1509 1510 mii = &sc->sc_mii; 1511 mii_tick(mii); 1512 1513 if (!sc->sis_link) 1514 sis_miibus_statchg(&sc->sc_dev); 1515 1516 timeout_add_sec(&sc->sis_timeout, 1); 1517 1518 splx(s); 1519 } 1520 1521 int 1522 sis_intr(void *arg) 1523 { 1524 struct sis_softc *sc = arg; 1525 struct ifnet *ifp = &sc->arpcom.ac_if; 1526 u_int32_t status; 1527 1528 if (sc->sis_stopped) /* Most likely shared interrupt */ 1529 return (0); 1530 1531 /* Reading the ISR register clears all interrupts. */ 1532 status = CSR_READ_4(sc, SIS_ISR); 1533 if ((status & SIS_INTRS) == 0) 1534 return (0); 1535 1536 if (status & 1537 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | 1538 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE)) 1539 sis_txeof(sc); 1540 1541 if (status & 1542 (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | 1543 SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE)) 1544 sis_rxeof(sc); 1545 1546 if (status & (SIS_ISR_RX_IDLE)) { 1547 /* consume what's there so that sis_rx_cons points 1548 * to the first HW owned descriptor. */ 1549 sis_rxeof(sc); 1550 /* reprogram the RX listptr */ 1551 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 1552 sc->sc_listmap->dm_segs[0].ds_addr + 1553 offsetof(struct sis_list_data, 1554 sis_rx_list[sc->sis_cdata.sis_rx_cons])); 1555 } 1556 1557 if (status & SIS_ISR_SYSERR) { 1558 sis_reset(sc); 1559 sis_init(sc); 1560 } 1561 1562 /* 1563 * XXX: Re-enable RX engine every time otherwise it occasionally 1564 * stops under unknown circumstances. 1565 */ 1566 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1567 1568 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1569 sis_start(ifp); 1570 1571 return (1); 1572 } 1573 1574 /* 1575 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1576 * pointers to the fragment pointers. 1577 */ 1578 int 1579 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1580 { 1581 struct sis_desc *f = NULL; 1582 int frag, cur, i; 1583 bus_dmamap_t map; 1584 1585 map = sc->sc_tx_sparemap; 1586 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 1587 m_head, BUS_DMA_NOWAIT) != 0) 1588 return (ENOBUFS); 1589 1590 /* 1591 * Start packing the mbufs in this chain into 1592 * the fragment pointers. Stop when we run out 1593 * of fragments or hit the end of the mbuf chain. 1594 */ 1595 cur = frag = *txidx; 1596 1597 for (i = 0; i < map->dm_nsegs; i++) { 1598 if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + i)) < 2) 1599 return(ENOBUFS); 1600 f = &sc->sis_ldata->sis_tx_list[frag]; 1601 f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len); 1602 f->sis_ptr = htole32(map->dm_segs[i].ds_addr); 1603 if (i != 0) 1604 f->sis_ctl |= htole32(SIS_CMDSTS_OWN); 1605 cur = frag; 1606 SIS_INC(frag, SIS_TX_LIST_CNT); 1607 } 1608 1609 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1610 BUS_DMASYNC_PREWRITE); 1611 1612 sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head; 1613 sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE); 1614 sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN); 1615 sc->sis_cdata.sis_tx_cnt += i; 1616 *txidx = frag; 1617 1618 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1619 offsetof(struct sis_list_data, sis_tx_list[0]), 1620 sizeof(struct sis_desc) * SIS_TX_LIST_CNT, 1621 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1622 1623 return (0); 1624 } 1625 1626 /* 1627 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1628 * to the mbuf data regions directly in the transmit lists. We also save a 1629 * copy of the pointers since the transmit list fragment pointers are 1630 * physical addresses. 1631 */ 1632 1633 void 1634 sis_start(struct ifnet *ifp) 1635 { 1636 struct sis_softc *sc; 1637 struct mbuf *m_head = NULL; 1638 u_int32_t idx, queued = 0; 1639 1640 sc = ifp->if_softc; 1641 1642 if (!sc->sis_link) 1643 return; 1644 1645 idx = sc->sis_cdata.sis_tx_prod; 1646 1647 if (ifp->if_flags & IFF_OACTIVE) 1648 return; 1649 1650 while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) { 1651 IFQ_POLL(&ifp->if_snd, m_head); 1652 if (m_head == NULL) 1653 break; 1654 1655 if (sis_encap(sc, m_head, &idx)) { 1656 ifp->if_flags |= IFF_OACTIVE; 1657 break; 1658 } 1659 1660 /* now we are committed to transmit the packet */ 1661 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1662 1663 queued++; 1664 1665 /* 1666 * If there's a BPF listener, bounce a copy of this frame 1667 * to him. 1668 */ 1669 #if NBPFILTER > 0 1670 if (ifp->if_bpf) 1671 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1672 #endif 1673 } 1674 1675 if (queued) { 1676 /* Transmit */ 1677 sc->sis_cdata.sis_tx_prod = idx; 1678 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); 1679 1680 /* 1681 * Set a timeout in case the chip goes out to lunch. 1682 */ 1683 ifp->if_timer = 5; 1684 } 1685 } 1686 1687 void 1688 sis_init(void *xsc) 1689 { 1690 struct sis_softc *sc = (struct sis_softc *)xsc; 1691 struct ifnet *ifp = &sc->arpcom.ac_if; 1692 struct mii_data *mii; 1693 int s; 1694 1695 s = splnet(); 1696 1697 /* 1698 * Cancel pending I/O and free all RX/TX buffers. 1699 */ 1700 sis_stop(sc); 1701 1702 #if NS_IHR_DELAY > 0 1703 /* Configure interrupt holdoff register. */ 1704 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A) 1705 CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE); 1706 #endif 1707 1708 mii = &sc->sc_mii; 1709 1710 /* Set MAC address */ 1711 if (sc->sis_type == SIS_TYPE_83815) { 1712 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); 1713 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1714 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0])); 1715 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); 1716 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1717 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1])); 1718 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); 1719 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1720 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2])); 1721 } else { 1722 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 1723 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1724 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0])); 1725 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 1726 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1727 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1])); 1728 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 1729 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1730 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2])); 1731 } 1732 1733 /* Init circular TX/RX lists. */ 1734 if (sis_ring_init(sc) != 0) { 1735 printf("%s: initialization failed: no memory for rx buffers\n", 1736 sc->sc_dev.dv_xname); 1737 sis_stop(sc); 1738 splx(s); 1739 return; 1740 } 1741 1742 /* 1743 * Page 78 of the DP83815 data sheet (september 2002 version) 1744 * recommends the following register settings "for optimum 1745 * performance." for rev 15C. The driver from NS also sets 1746 * the PHY_CR register for later versions. 1747 * 1748 * This resolves an issue with tons of errors in AcceptPerfectMatch 1749 * (non-IFF_PROMISC) mode. 1750 */ 1751 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { 1752 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1753 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); 1754 /* set val for c2 */ 1755 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); 1756 /* load/kill c2 */ 1757 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); 1758 /* raise SD off, from 4 to c */ 1759 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); 1760 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1761 } 1762 1763 /* 1764 * Program promiscuous mode and multicast filters. 1765 */ 1766 sis_iff(sc); 1767 1768 /* Turn the receive filter on */ 1769 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 1770 1771 /* 1772 * Load the address of the RX and TX lists. 1773 */ 1774 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr + 1775 offsetof(struct sis_list_data, sis_rx_list[0])); 1776 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr + 1777 offsetof(struct sis_list_data, sis_tx_list[0])); 1778 1779 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of 1780 * the PCI bus. When this bit is set, the Max DMA Burst Size 1781 * for TX/RX DMA should be no larger than 16 double words. 1782 */ 1783 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) 1784 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); 1785 else 1786 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); 1787 1788 /* Accept Long Packets for VLAN support */ 1789 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); 1790 1791 /* 1792 * Assume 100Mbps link, actual MAC configuration is done 1793 * after getting a valid link. 1794 */ 1795 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 1796 1797 /* 1798 * Enable interrupts. 1799 */ 1800 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); 1801 CSR_WRITE_4(sc, SIS_IER, 1); 1802 1803 /* Clear MAC disable. */ 1804 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE); 1805 1806 sc->sis_link = 0; 1807 mii_mediachg(mii); 1808 1809 sc->sis_stopped = 0; 1810 ifp->if_flags |= IFF_RUNNING; 1811 ifp->if_flags &= ~IFF_OACTIVE; 1812 1813 splx(s); 1814 1815 timeout_add_sec(&sc->sis_timeout, 1); 1816 } 1817 1818 /* 1819 * Set media options. 1820 */ 1821 int 1822 sis_ifmedia_upd(struct ifnet *ifp) 1823 { 1824 struct sis_softc *sc; 1825 struct mii_data *mii; 1826 1827 sc = ifp->if_softc; 1828 1829 mii = &sc->sc_mii; 1830 if (mii->mii_instance) { 1831 struct mii_softc *miisc; 1832 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1833 mii_phy_reset(miisc); 1834 } 1835 mii_mediachg(mii); 1836 1837 return (0); 1838 } 1839 1840 /* 1841 * Report current media status. 1842 */ 1843 void 1844 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1845 { 1846 struct sis_softc *sc; 1847 struct mii_data *mii; 1848 1849 sc = ifp->if_softc; 1850 1851 mii = &sc->sc_mii; 1852 mii_pollstat(mii); 1853 ifmr->ifm_active = mii->mii_media_active; 1854 ifmr->ifm_status = mii->mii_media_status; 1855 } 1856 1857 int 1858 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1859 { 1860 struct sis_softc *sc = ifp->if_softc; 1861 struct ifaddr *ifa = (struct ifaddr *) data; 1862 struct ifreq *ifr = (struct ifreq *) data; 1863 struct mii_data *mii; 1864 int s, error = 0; 1865 1866 s = splnet(); 1867 1868 switch(command) { 1869 case SIOCSIFADDR: 1870 ifp->if_flags |= IFF_UP; 1871 if (!(ifp->if_flags & IFF_RUNNING)) 1872 sis_init(sc); 1873 #ifdef INET 1874 if (ifa->ifa_addr->sa_family == AF_INET) 1875 arp_ifinit(&sc->arpcom, ifa); 1876 #endif 1877 break; 1878 1879 case SIOCSIFFLAGS: 1880 if (ifp->if_flags & IFF_UP) { 1881 if (ifp->if_flags & IFF_RUNNING) 1882 error = ENETRESET; 1883 else 1884 sis_init(sc); 1885 } else { 1886 if (ifp->if_flags & IFF_RUNNING) 1887 sis_stop(sc); 1888 } 1889 break; 1890 1891 case SIOCGIFMEDIA: 1892 case SIOCSIFMEDIA: 1893 mii = &sc->sc_mii; 1894 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1895 break; 1896 1897 default: 1898 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1899 } 1900 1901 if (error == ENETRESET) { 1902 if (ifp->if_flags & IFF_RUNNING) 1903 sis_iff(sc); 1904 error = 0; 1905 } 1906 1907 splx(s); 1908 return(error); 1909 } 1910 1911 void 1912 sis_watchdog(struct ifnet *ifp) 1913 { 1914 struct sis_softc *sc; 1915 int s; 1916 1917 sc = ifp->if_softc; 1918 1919 if (sc->sis_stopped) 1920 return; 1921 1922 ifp->if_oerrors++; 1923 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1924 1925 s = splnet(); 1926 sis_stop(sc); 1927 sis_reset(sc); 1928 sis_init(sc); 1929 1930 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1931 sis_start(ifp); 1932 1933 splx(s); 1934 } 1935 1936 /* 1937 * Stop the adapter and free any mbufs allocated to the 1938 * RX and TX lists. 1939 */ 1940 void 1941 sis_stop(struct sis_softc *sc) 1942 { 1943 int i; 1944 struct ifnet *ifp; 1945 1946 if (sc->sis_stopped) 1947 return; 1948 1949 ifp = &sc->arpcom.ac_if; 1950 ifp->if_timer = 0; 1951 1952 timeout_del(&sc->sis_timeout); 1953 1954 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1955 sc->sis_stopped = 1; 1956 1957 CSR_WRITE_4(sc, SIS_IER, 0); 1958 CSR_WRITE_4(sc, SIS_IMR, 0); 1959 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ 1960 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE); 1961 DELAY(1000); 1962 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); 1963 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); 1964 1965 sc->sis_link = 0; 1966 1967 /* 1968 * Free data in the RX lists. 1969 */ 1970 for (i = 0; i < SIS_RX_LIST_CNT; i++) { 1971 if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) { 1972 bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map; 1973 1974 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1975 BUS_DMASYNC_POSTREAD); 1976 bus_dmamap_unload(sc->sc_dmat, map); 1977 } 1978 if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) { 1979 m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf); 1980 sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL; 1981 } 1982 bzero(&sc->sis_ldata->sis_rx_list[i], 1983 sizeof(struct sis_desc) - sizeof(bus_dmamap_t)); 1984 } 1985 1986 /* 1987 * Free the TX list buffers. 1988 */ 1989 for (i = 0; i < SIS_TX_LIST_CNT; i++) { 1990 if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) { 1991 bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map; 1992 1993 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1994 BUS_DMASYNC_POSTWRITE); 1995 bus_dmamap_unload(sc->sc_dmat, map); 1996 } 1997 if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) { 1998 m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf); 1999 sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL; 2000 } 2001 bzero(&sc->sis_ldata->sis_tx_list[i], 2002 sizeof(struct sis_desc) - sizeof(bus_dmamap_t)); 2003 } 2004 } 2005