1 /* $OpenBSD: if_sis.c,v 1.81 2008/09/10 14:01:22 blambert Exp $ */ 2 /* 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $ 34 */ 35 36 /* 37 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are 38 * available from http://www.sis.com.tw. 39 * 40 * This driver also supports the NatSemi DP83815. Datasheets are 41 * available from http://www.national.com. 42 * 43 * Written by Bill Paul <wpaul@ee.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with 50 * simple TX and RX descriptors of 3 longwords in size. The receiver 51 * has a single perfect filter entry for the station address and a 52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based 53 * transceiver while the 7016 requires an external transceiver chip. 54 * Both chips offer the standard bit-bang MII interface as well as 55 * an enchanced PHY interface which simplifies accessing MII registers. 56 * 57 * The only downside to this chipset is that RX descriptors must be 58 * longword aligned. 59 */ 60 61 #include "bpfilter.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/mbuf.h> 66 #include <sys/protosw.h> 67 #include <sys/socket.h> 68 #include <sys/ioctl.h> 69 #include <sys/errno.h> 70 #include <sys/malloc.h> 71 #include <sys/kernel.h> 72 #include <sys/timeout.h> 73 74 #include <net/if.h> 75 #include <net/if_dl.h> 76 #include <net/if_types.h> 77 78 #ifdef INET 79 #include <netinet/in.h> 80 #include <netinet/in_systm.h> 81 #include <netinet/in_var.h> 82 #include <netinet/ip.h> 83 #include <netinet/if_ether.h> 84 #endif 85 86 #include <net/if_media.h> 87 88 #if NBPFILTER > 0 89 #include <net/bpf.h> 90 #endif 91 92 #include <sys/device.h> 93 94 #include <dev/mii/mii.h> 95 #include <dev/mii/miivar.h> 96 97 #include <dev/pci/pcireg.h> 98 #include <dev/pci/pcivar.h> 99 #include <dev/pci/pcidevs.h> 100 101 #define SIS_USEIOSPACE 102 103 #include <dev/pci/if_sisreg.h> 104 105 int sis_probe(struct device *, void *, void *); 106 void sis_attach(struct device *, struct device *, void *); 107 108 struct cfattach sis_ca = { 109 sizeof(struct sis_softc), sis_probe, sis_attach 110 }; 111 112 struct cfdriver sis_cd = { 113 0, "sis", DV_IFNET 114 }; 115 116 int sis_intr(void *); 117 void sis_shutdown(void *); 118 int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); 119 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *); 120 void sis_rxeof(struct sis_softc *); 121 void sis_rxeoc(struct sis_softc *); 122 void sis_txeof(struct sis_softc *); 123 void sis_tick(void *); 124 void sis_start(struct ifnet *); 125 int sis_ioctl(struct ifnet *, u_long, caddr_t); 126 void sis_init(void *); 127 void sis_stop(struct sis_softc *); 128 void sis_watchdog(struct ifnet *); 129 int sis_ifmedia_upd(struct ifnet *); 130 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 132 u_int16_t sis_reverse(u_int16_t); 133 void sis_delay(struct sis_softc *); 134 void sis_eeprom_idle(struct sis_softc *); 135 void sis_eeprom_putbyte(struct sis_softc *, int); 136 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *); 137 #if defined(__amd64__) || defined(__i386__) 138 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int); 139 #endif 140 void sis_read_mac(struct sis_softc *, struct pci_attach_args *); 141 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int); 142 void sis_read96x_mac(struct sis_softc *); 143 144 void sis_mii_sync(struct sis_softc *); 145 void sis_mii_send(struct sis_softc *, u_int32_t, int); 146 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *); 147 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *); 148 int sis_miibus_readreg(struct device *, int, int); 149 void sis_miibus_writereg(struct device *, int, int, int); 150 void sis_miibus_statchg(struct device *); 151 152 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *); 153 void sis_setmulti(struct sis_softc *); 154 void sis_setmulti_sis(struct sis_softc *); 155 void sis_setmulti_ns(struct sis_softc *); 156 void sis_setpromisc(struct sis_softc *); 157 void sis_reset(struct sis_softc *); 158 int sis_ring_init(struct sis_softc *); 159 160 #define SIS_SETBIT(sc, reg, x) \ 161 CSR_WRITE_4(sc, reg, \ 162 CSR_READ_4(sc, reg) | (x)) 163 164 #define SIS_CLRBIT(sc, reg, x) \ 165 CSR_WRITE_4(sc, reg, \ 166 CSR_READ_4(sc, reg) & ~(x)) 167 168 #define SIO_SET(x) \ 169 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) 170 171 #define SIO_CLR(x) \ 172 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) 173 174 const struct pci_matchid sis_devices[] = { 175 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 }, 176 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 }, 177 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 } 178 }; 179 180 /* 181 * Routine to reverse the bits in a word. Stolen almost 182 * verbatim from /usr/games/fortune. 183 */ 184 u_int16_t 185 sis_reverse(u_int16_t n) 186 { 187 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); 188 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); 189 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); 190 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); 191 192 return (n); 193 } 194 195 void 196 sis_delay(struct sis_softc *sc) 197 { 198 int idx; 199 200 for (idx = (300 / 33) + 1; idx > 0; idx--) 201 CSR_READ_4(sc, SIS_CSR); 202 } 203 204 void 205 sis_eeprom_idle(struct sis_softc *sc) 206 { 207 int i; 208 209 SIO_SET(SIS_EECTL_CSEL); 210 sis_delay(sc); 211 SIO_SET(SIS_EECTL_CLK); 212 sis_delay(sc); 213 214 for (i = 0; i < 25; i++) { 215 SIO_CLR(SIS_EECTL_CLK); 216 sis_delay(sc); 217 SIO_SET(SIS_EECTL_CLK); 218 sis_delay(sc); 219 } 220 221 SIO_CLR(SIS_EECTL_CLK); 222 sis_delay(sc); 223 SIO_CLR(SIS_EECTL_CSEL); 224 sis_delay(sc); 225 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); 226 } 227 228 /* 229 * Send a read command and address to the EEPROM, check for ACK. 230 */ 231 void 232 sis_eeprom_putbyte(struct sis_softc *sc, int addr) 233 { 234 int d, i; 235 236 d = addr | SIS_EECMD_READ; 237 238 /* 239 * Feed in each bit and strobe the clock. 240 */ 241 for (i = 0x400; i; i >>= 1) { 242 if (d & i) 243 SIO_SET(SIS_EECTL_DIN); 244 else 245 SIO_CLR(SIS_EECTL_DIN); 246 sis_delay(sc); 247 SIO_SET(SIS_EECTL_CLK); 248 sis_delay(sc); 249 SIO_CLR(SIS_EECTL_CLK); 250 sis_delay(sc); 251 } 252 } 253 254 /* 255 * Read a word of data stored in the EEPROM at address 'addr.' 256 */ 257 void 258 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest) 259 { 260 int i; 261 u_int16_t word = 0; 262 263 /* Force EEPROM to idle state. */ 264 sis_eeprom_idle(sc); 265 266 /* Enter EEPROM access mode. */ 267 sis_delay(sc); 268 SIO_CLR(SIS_EECTL_CLK); 269 sis_delay(sc); 270 SIO_SET(SIS_EECTL_CSEL); 271 sis_delay(sc); 272 273 /* 274 * Send address of word we want to read. 275 */ 276 sis_eeprom_putbyte(sc, addr); 277 278 /* 279 * Start reading bits from EEPROM. 280 */ 281 for (i = 0x8000; i; i >>= 1) { 282 SIO_SET(SIS_EECTL_CLK); 283 sis_delay(sc); 284 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) 285 word |= i; 286 sis_delay(sc); 287 SIO_CLR(SIS_EECTL_CLK); 288 sis_delay(sc); 289 } 290 291 /* Turn off EEPROM access mode. */ 292 sis_eeprom_idle(sc); 293 294 *dest = word; 295 } 296 297 /* 298 * Read a sequence of words from the EEPROM. 299 */ 300 void 301 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, 302 int off, int cnt, int swap) 303 { 304 int i; 305 u_int16_t word = 0, *ptr; 306 307 for (i = 0; i < cnt; i++) { 308 sis_eeprom_getword(sc, off + i, &word); 309 ptr = (u_int16_t *)(dest + (i * 2)); 310 if (swap) 311 *ptr = ntohs(word); 312 else 313 *ptr = word; 314 } 315 } 316 317 #if defined(__amd64__) || defined(__i386__) 318 void 319 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa, 320 caddr_t dest, int off, int cnt) 321 { 322 bus_space_tag_t btag; 323 u_int32_t reg; 324 int i; 325 326 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48); 327 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40); 328 329 #if defined(__amd64__) 330 btag = X86_BUS_SPACE_IO; 331 #elif defined(__i386__) 332 btag = I386_BUS_SPACE_IO; 333 #endif 334 335 for (i = 0; i < cnt; i++) { 336 bus_space_write_1(btag, 0x0, 0x70, i + off); 337 *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); 338 } 339 340 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40); 341 } 342 #endif 343 344 void 345 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa) 346 { 347 u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr; 348 349 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RELOAD); 350 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_RELOAD); 351 352 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 353 354 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 355 enaddr[0] = CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff; 356 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 357 enaddr[1] = CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff; 358 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 359 enaddr[2] = CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff; 360 361 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 362 } 363 364 void 365 sis_read96x_mac(struct sis_softc *sc) 366 { 367 int i; 368 369 SIO_SET(SIS96x_EECTL_REQ); 370 371 for (i = 0; i < 2000; i++) { 372 if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) { 373 sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 374 SIS_EE_NODEADDR, 3, 0); 375 break; 376 } else 377 DELAY(1); 378 } 379 380 SIO_SET(SIS96x_EECTL_DONE); 381 } 382 383 /* 384 * Sync the PHYs by setting data bit and strobing the clock 32 times. 385 */ 386 void 387 sis_mii_sync(struct sis_softc *sc) 388 { 389 int i; 390 391 SIO_SET(SIS_MII_DIR|SIS_MII_DATA); 392 393 for (i = 0; i < 32; i++) { 394 SIO_SET(SIS_MII_CLK); 395 DELAY(1); 396 SIO_CLR(SIS_MII_CLK); 397 DELAY(1); 398 } 399 } 400 401 /* 402 * Clock a series of bits through the MII. 403 */ 404 void 405 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt) 406 { 407 int i; 408 409 SIO_CLR(SIS_MII_CLK); 410 411 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 412 if (bits & i) 413 SIO_SET(SIS_MII_DATA); 414 else 415 SIO_CLR(SIS_MII_DATA); 416 DELAY(1); 417 SIO_CLR(SIS_MII_CLK); 418 DELAY(1); 419 SIO_SET(SIS_MII_CLK); 420 } 421 } 422 423 /* 424 * Read an PHY register through the MII. 425 */ 426 int 427 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) 428 { 429 int i, ack, s; 430 431 s = splnet(); 432 433 /* 434 * Set up frame for RX. 435 */ 436 frame->mii_stdelim = SIS_MII_STARTDELIM; 437 frame->mii_opcode = SIS_MII_READOP; 438 frame->mii_turnaround = 0; 439 frame->mii_data = 0; 440 441 /* 442 * Turn on data xmit. 443 */ 444 SIO_SET(SIS_MII_DIR); 445 446 sis_mii_sync(sc); 447 448 /* 449 * Send command/address info. 450 */ 451 sis_mii_send(sc, frame->mii_stdelim, 2); 452 sis_mii_send(sc, frame->mii_opcode, 2); 453 sis_mii_send(sc, frame->mii_phyaddr, 5); 454 sis_mii_send(sc, frame->mii_regaddr, 5); 455 456 /* Idle bit */ 457 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); 458 DELAY(1); 459 SIO_SET(SIS_MII_CLK); 460 DELAY(1); 461 462 /* Turn off xmit. */ 463 SIO_CLR(SIS_MII_DIR); 464 465 /* Check for ack */ 466 SIO_CLR(SIS_MII_CLK); 467 DELAY(1); 468 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; 469 SIO_SET(SIS_MII_CLK); 470 DELAY(1); 471 472 /* 473 * Now try reading data bits. If the ack failed, we still 474 * need to clock through 16 cycles to keep the PHY(s) in sync. 475 */ 476 if (ack) { 477 for(i = 0; i < 16; i++) { 478 SIO_CLR(SIS_MII_CLK); 479 DELAY(1); 480 SIO_SET(SIS_MII_CLK); 481 DELAY(1); 482 } 483 goto fail; 484 } 485 486 for (i = 0x8000; i; i >>= 1) { 487 SIO_CLR(SIS_MII_CLK); 488 DELAY(1); 489 if (!ack) { 490 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) 491 frame->mii_data |= i; 492 DELAY(1); 493 } 494 SIO_SET(SIS_MII_CLK); 495 DELAY(1); 496 } 497 498 fail: 499 500 SIO_CLR(SIS_MII_CLK); 501 DELAY(1); 502 SIO_SET(SIS_MII_CLK); 503 DELAY(1); 504 505 splx(s); 506 507 if (ack) 508 return (1); 509 return (0); 510 } 511 512 /* 513 * Write to a PHY register through the MII. 514 */ 515 int 516 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) 517 { 518 int s; 519 520 s = splnet(); 521 /* 522 * Set up frame for TX. 523 */ 524 525 frame->mii_stdelim = SIS_MII_STARTDELIM; 526 frame->mii_opcode = SIS_MII_WRITEOP; 527 frame->mii_turnaround = SIS_MII_TURNAROUND; 528 529 /* 530 * Turn on data output. 531 */ 532 SIO_SET(SIS_MII_DIR); 533 534 sis_mii_sync(sc); 535 536 sis_mii_send(sc, frame->mii_stdelim, 2); 537 sis_mii_send(sc, frame->mii_opcode, 2); 538 sis_mii_send(sc, frame->mii_phyaddr, 5); 539 sis_mii_send(sc, frame->mii_regaddr, 5); 540 sis_mii_send(sc, frame->mii_turnaround, 2); 541 sis_mii_send(sc, frame->mii_data, 16); 542 543 /* Idle bit. */ 544 SIO_SET(SIS_MII_CLK); 545 DELAY(1); 546 SIO_CLR(SIS_MII_CLK); 547 DELAY(1); 548 549 /* 550 * Turn off xmit. 551 */ 552 SIO_CLR(SIS_MII_DIR); 553 554 splx(s); 555 556 return (0); 557 } 558 559 int 560 sis_miibus_readreg(struct device *self, int phy, int reg) 561 { 562 struct sis_softc *sc = (struct sis_softc *)self; 563 struct sis_mii_frame frame; 564 565 if (sc->sis_type == SIS_TYPE_83815) { 566 if (phy != 0) 567 return (0); 568 /* 569 * The NatSemi chip can take a while after 570 * a reset to come ready, during which the BMSR 571 * returns a value of 0. This is *never* supposed 572 * to happen: some of the BMSR bits are meant to 573 * be hardwired in the on position, and this can 574 * confuse the miibus code a bit during the probe 575 * and attach phase. So we make an effort to check 576 * for this condition and wait for it to clear. 577 */ 578 if (!CSR_READ_4(sc, NS_BMSR)) 579 DELAY(1000); 580 return CSR_READ_4(sc, NS_BMCR + (reg * 4)); 581 } 582 583 /* 584 * Chipsets < SIS_635 seem not to be able to read/write 585 * through mdio. Use the enhanced PHY access register 586 * again for them. 587 */ 588 if (sc->sis_type == SIS_TYPE_900 && 589 sc->sis_rev < SIS_REV_635) { 590 int i, val = 0; 591 592 if (phy != 0) 593 return (0); 594 595 CSR_WRITE_4(sc, SIS_PHYCTL, 596 (phy << 11) | (reg << 6) | SIS_PHYOP_READ); 597 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 598 599 for (i = 0; i < SIS_TIMEOUT; i++) { 600 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 601 break; 602 } 603 604 if (i == SIS_TIMEOUT) { 605 printf("%s: PHY failed to come ready\n", 606 sc->sc_dev.dv_xname); 607 return (0); 608 } 609 610 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; 611 612 if (val == 0xFFFF) 613 return (0); 614 615 return (val); 616 } else { 617 bzero((char *)&frame, sizeof(frame)); 618 619 frame.mii_phyaddr = phy; 620 frame.mii_regaddr = reg; 621 sis_mii_readreg(sc, &frame); 622 623 return (frame.mii_data); 624 } 625 } 626 627 void 628 sis_miibus_writereg(struct device *self, int phy, int reg, int data) 629 { 630 struct sis_softc *sc = (struct sis_softc *)self; 631 struct sis_mii_frame frame; 632 633 if (sc->sis_type == SIS_TYPE_83815) { 634 if (phy != 0) 635 return; 636 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); 637 return; 638 } 639 640 /* 641 * Chipsets < SIS_635 seem not to be able to read/write 642 * through mdio. Use the enhanced PHY access register 643 * again for them. 644 */ 645 if (sc->sis_type == SIS_TYPE_900 && 646 sc->sis_rev < SIS_REV_635) { 647 int i; 648 649 if (phy != 0) 650 return; 651 652 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | 653 (reg << 6) | SIS_PHYOP_WRITE); 654 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 655 656 for (i = 0; i < SIS_TIMEOUT; i++) { 657 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 658 break; 659 } 660 661 if (i == SIS_TIMEOUT) 662 printf("%s: PHY failed to come ready\n", 663 sc->sc_dev.dv_xname); 664 } else { 665 bzero((char *)&frame, sizeof(frame)); 666 667 frame.mii_phyaddr = phy; 668 frame.mii_regaddr = reg; 669 frame.mii_data = data; 670 sis_mii_writereg(sc, &frame); 671 } 672 } 673 674 void 675 sis_miibus_statchg(struct device *self) 676 { 677 struct sis_softc *sc = (struct sis_softc *)self; 678 679 sis_init(sc); 680 } 681 682 u_int32_t 683 sis_mchash(struct sis_softc *sc, const uint8_t *addr) 684 { 685 uint32_t crc; 686 687 /* Compute CRC for the address value. */ 688 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 689 690 /* 691 * return the filter bit position 692 * 693 * The NatSemi chip has a 512-bit filter, which is 694 * different than the SiS, so we special-case it. 695 */ 696 if (sc->sis_type == SIS_TYPE_83815) 697 return (crc >> 23); 698 else if (sc->sis_rev >= SIS_REV_635 || 699 sc->sis_rev == SIS_REV_900B) 700 return (crc >> 24); 701 else 702 return (crc >> 25); 703 } 704 705 void 706 sis_setmulti(struct sis_softc *sc) 707 { 708 if (sc->sis_type == SIS_TYPE_83815) 709 sis_setmulti_ns(sc); 710 else 711 sis_setmulti_sis(sc); 712 } 713 714 void 715 sis_setmulti_ns(struct sis_softc *sc) 716 { 717 struct ifnet *ifp; 718 struct arpcom *ac = &sc->arpcom; 719 struct ether_multi *enm; 720 struct ether_multistep step; 721 u_int32_t h = 0, i, filtsave; 722 int bit, index; 723 724 ifp = &sc->arpcom.ac_if; 725 726 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 727 allmulti: 728 SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 729 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 730 return; 731 } 732 733 ETHER_FIRST_MULTI(step, ac, enm); 734 while (enm != NULL) { 735 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 736 ifp->if_flags |= IFF_ALLMULTI; 737 goto allmulti; 738 } 739 ETHER_NEXT_MULTI(step, enm); 740 } 741 742 /* 743 * We have to explicitly enable the multicast hash table 744 * on the NatSemi chip if we want to use it, which we do. 745 */ 746 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 747 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 748 749 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 750 751 /* first, zot all the existing hash bits */ 752 for (i = 0; i < 32; i++) { 753 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); 754 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); 755 } 756 757 ETHER_FIRST_MULTI(step, ac, enm); 758 while (enm != NULL) { 759 h = sis_mchash(sc, enm->enm_addrlo); 760 index = h >> 3; 761 bit = h & 0x1F; 762 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); 763 if (bit > 0xF) 764 bit -= 0x10; 765 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); 766 ETHER_NEXT_MULTI(step, enm); 767 } 768 769 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 770 } 771 772 void 773 sis_setmulti_sis(struct sis_softc *sc) 774 { 775 struct ifnet *ifp; 776 struct arpcom *ac = &sc->arpcom; 777 struct ether_multi *enm; 778 struct ether_multistep step; 779 u_int32_t h, i, n, ctl; 780 u_int16_t hashes[16]; 781 782 ifp = &sc->arpcom.ac_if; 783 784 /* hash table size */ 785 if (sc->sis_rev >= SIS_REV_635 || 786 sc->sis_rev == SIS_REV_900B) 787 n = 16; 788 else 789 n = 8; 790 791 ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; 792 793 if (ifp->if_flags & IFF_BROADCAST) 794 ctl |= SIS_RXFILTCTL_BROAD; 795 796 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 797 allmulti: 798 ctl |= SIS_RXFILTCTL_ALLMULTI; 799 if (ifp->if_flags & IFF_PROMISC) 800 ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; 801 for (i = 0; i < n; i++) 802 hashes[i] = ~0; 803 } else { 804 for (i = 0; i < n; i++) 805 hashes[i] = 0; 806 i = 0; 807 ETHER_FIRST_MULTI(step, ac, enm); 808 while (enm != NULL) { 809 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 810 ifp->if_flags |= IFF_ALLMULTI; 811 goto allmulti; 812 } 813 814 h = sis_mchash(sc, enm->enm_addrlo); 815 hashes[h >> 4] |= 1 << (h & 0xf); 816 i++; 817 ETHER_NEXT_MULTI(step, enm); 818 } 819 if (i > n) { 820 ctl |= SIS_RXFILTCTL_ALLMULTI; 821 for (i = 0; i < n; i++) 822 hashes[i] = ~0; 823 } 824 } 825 826 for (i = 0; i < n; i++) { 827 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); 828 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); 829 } 830 831 CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); 832 } 833 834 void 835 sis_setpromisc(struct sis_softc *sc) 836 { 837 struct ifnet *ifp = ifp = &sc->arpcom.ac_if; 838 839 /* If we want promiscuous mode, set the allframes bit. */ 840 if (ifp->if_flags & IFF_PROMISC) 841 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 842 else 843 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 844 } 845 846 void 847 sis_reset(struct sis_softc *sc) 848 { 849 int i; 850 851 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); 852 853 for (i = 0; i < SIS_TIMEOUT; i++) { 854 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) 855 break; 856 } 857 858 if (i == SIS_TIMEOUT) 859 printf("%s: reset never completed\n", sc->sc_dev.dv_xname); 860 861 /* Wait a little while for the chip to get its brains in order. */ 862 DELAY(1000); 863 864 /* 865 * If this is a NetSemi chip, make sure to clear 866 * PME mode. 867 */ 868 if (sc->sis_type == SIS_TYPE_83815) { 869 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); 870 CSR_WRITE_4(sc, NS_CLKRUN, 0); 871 } 872 } 873 874 /* 875 * Probe for an SiS chip. Check the PCI vendor and device 876 * IDs against our list and return a device name if we find a match. 877 */ 878 int 879 sis_probe(struct device *parent, void *match, void *aux) 880 { 881 return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices, 882 sizeof(sis_devices)/sizeof(sis_devices[0]))); 883 } 884 885 /* 886 * Attach the interface. Allocate softc structures, do ifmedia 887 * setup and ethernet/BPF attach. 888 */ 889 void 890 sis_attach(struct device *parent, struct device *self, void *aux) 891 { 892 int i; 893 const char *intrstr = NULL; 894 pcireg_t command; 895 struct sis_softc *sc = (struct sis_softc *)self; 896 struct pci_attach_args *pa = aux; 897 pci_chipset_tag_t pc = pa->pa_pc; 898 pci_intr_handle_t ih; 899 struct ifnet *ifp; 900 bus_size_t size; 901 902 sc->sis_stopped = 1; 903 904 /* 905 * Handle power management nonsense. 906 */ 907 command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_CAPID) & 0x000000FF; 908 if (command == 0x01) { 909 910 command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL); 911 if (command & SIS_PSTATE_MASK) { 912 u_int32_t iobase, membase, irq; 913 914 /* Save important PCI config data. */ 915 iobase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOIO); 916 membase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOMEM); 917 irq = pci_conf_read(pc, pa->pa_tag, SIS_PCI_INTLINE); 918 919 /* Reset the power state. */ 920 printf("%s: chip is in D%d power mode -- setting to D0\n", 921 sc->sc_dev.dv_xname, command & SIS_PSTATE_MASK); 922 command &= 0xFFFFFFFC; 923 pci_conf_write(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL, command); 924 925 /* Restore PCI config data. */ 926 pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOIO, iobase); 927 pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOMEM, membase); 928 pci_conf_write(pc, pa->pa_tag, SIS_PCI_INTLINE, irq); 929 } 930 } 931 932 /* 933 * Map control/status registers. 934 */ 935 936 #ifdef SIS_USEIOSPACE 937 if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 938 &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) { 939 printf(": can't map i/o space\n"); 940 return; 941 } 942 #else 943 if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 944 &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) { 945 printf(": can't map mem space\n"); 946 return; 947 } 948 #endif 949 950 /* Allocate interrupt */ 951 if (pci_intr_map(pa, &ih)) { 952 printf(": couldn't map interrupt\n"); 953 goto fail_1; 954 } 955 intrstr = pci_intr_string(pc, ih); 956 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc, 957 self->dv_xname); 958 if (sc->sc_ih == NULL) { 959 printf(": couldn't establish interrupt"); 960 if (intrstr != NULL) 961 printf(" at %s", intrstr); 962 printf("\n"); 963 goto fail_1; 964 } 965 966 switch (PCI_PRODUCT(pa->pa_id)) { 967 case PCI_PRODUCT_SIS_900: 968 sc->sis_type = SIS_TYPE_900; 969 break; 970 case PCI_PRODUCT_SIS_7016: 971 sc->sis_type = SIS_TYPE_7016; 972 break; 973 case PCI_PRODUCT_NS_DP83815: 974 sc->sis_type = SIS_TYPE_83815; 975 break; 976 default: 977 break; 978 } 979 sc->sis_rev = PCI_REVISION(pa->pa_class); 980 981 /* Reset the adapter. */ 982 sis_reset(sc); 983 984 if (sc->sis_type == SIS_TYPE_900 && 985 (sc->sis_rev == SIS_REV_635 || 986 sc->sis_rev == SIS_REV_900B)) { 987 SIO_SET(SIS_CFG_RND_CNT); 988 SIO_SET(SIS_CFG_PERR_DETECT); 989 } 990 991 /* 992 * Get station address from the EEPROM. 993 */ 994 switch (PCI_VENDOR(pa->pa_id)) { 995 case PCI_VENDOR_NS: 996 sc->sis_srr = CSR_READ_4(sc, NS_SRR); 997 998 if (sc->sis_srr == NS_SRR_15C) 999 printf(", DP83815C"); 1000 else if (sc->sis_srr == NS_SRR_15D) 1001 printf(", DP83815D"); 1002 else if (sc->sis_srr == NS_SRR_16A) 1003 printf(", DP83816A"); 1004 else 1005 printf(", srr %x", sc->sis_srr); 1006 1007 /* 1008 * Reading the MAC address out of the EEPROM on 1009 * the NatSemi chip takes a bit more work than 1010 * you'd expect. The address spans 4 16-bit words, 1011 * with the first word containing only a single bit. 1012 * You have to shift everything over one bit to 1013 * get it aligned properly. Also, the bits are 1014 * stored backwards (the LSB is really the MSB, 1015 * and so on) so you have to reverse them in order 1016 * to get the MAC address into the form we want. 1017 * Why? Who the hell knows. 1018 */ 1019 { 1020 u_int16_t tmp[4]; 1021 1022 sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,4,0); 1023 1024 /* Shift everything over one bit. */ 1025 tmp[3] = tmp[3] >> 1; 1026 tmp[3] |= tmp[2] << 15; 1027 tmp[2] = tmp[2] >> 1; 1028 tmp[2] |= tmp[1] << 15; 1029 tmp[1] = tmp[1] >> 1; 1030 tmp[1] |= tmp[0] << 15; 1031 1032 /* Now reverse all the bits. */ 1033 tmp[3] = sis_reverse(tmp[3]); 1034 tmp[2] = sis_reverse(tmp[2]); 1035 tmp[1] = sis_reverse(tmp[1]); 1036 1037 bcopy((char *)&tmp[1], sc->arpcom.ac_enaddr, 1038 ETHER_ADDR_LEN); 1039 } 1040 break; 1041 case PCI_VENDOR_SIS: 1042 default: 1043 #if defined(__amd64__) || defined(__i386__) 1044 /* 1045 * If this is a SiS 630E chipset with an embedded 1046 * SiS 900 controller, we have to read the MAC address 1047 * from the APC CMOS RAM. Our method for doing this 1048 * is very ugly since we have to reach out and grab 1049 * ahold of hardware for which we cannot properly 1050 * allocate resources. This code is only compiled on 1051 * the i386 architecture since the SiS 630E chipset 1052 * is for x86 motherboards only. Note that there are 1053 * a lot of magic numbers in this hack. These are 1054 * taken from SiS's Linux driver. I'd like to replace 1055 * them with proper symbolic definitions, but that 1056 * requires some datasheets that I don't have access 1057 * to at the moment. 1058 */ 1059 if (sc->sis_rev == SIS_REV_630S || 1060 sc->sis_rev == SIS_REV_630E) 1061 sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr, 1062 0x9, 6); 1063 else 1064 #endif 1065 if (sc->sis_rev == SIS_REV_96x) 1066 sis_read96x_mac(sc); 1067 else if (sc->sis_rev == SIS_REV_635 || 1068 sc->sis_rev == SIS_REV_630ET || 1069 sc->sis_rev == SIS_REV_630EA1) 1070 sis_read_mac(sc, pa); 1071 else 1072 sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1073 SIS_EE_NODEADDR, 3, 0); 1074 break; 1075 } 1076 1077 printf(": %s, address %s\n", intrstr, 1078 ether_sprintf(sc->arpcom.ac_enaddr)); 1079 1080 sc->sc_dmat = pa->pa_dmat; 1081 1082 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data), 1083 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg, 1084 BUS_DMA_NOWAIT) != 0) { 1085 printf(": can't alloc list mem\n"); 1086 goto fail_2; 1087 } 1088 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg, 1089 sizeof(struct sis_list_data), &sc->sc_listkva, 1090 BUS_DMA_NOWAIT) != 0) { 1091 printf(": can't map list mem\n"); 1092 goto fail_2; 1093 } 1094 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1, 1095 sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT, 1096 &sc->sc_listmap) != 0) { 1097 printf(": can't alloc list map\n"); 1098 goto fail_2; 1099 } 1100 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva, 1101 sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) { 1102 printf(": can't load list map\n"); 1103 goto fail_2; 1104 } 1105 sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva; 1106 bzero(sc->sis_ldata, sizeof(struct sis_list_data)); 1107 1108 for (i = 0; i < SIS_RX_LIST_CNT_MAX; i++) { 1109 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1110 BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) { 1111 printf(": can't create rx map\n"); 1112 goto fail_2; 1113 } 1114 } 1115 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1116 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) { 1117 printf(": can't create rx spare map\n"); 1118 goto fail_2; 1119 } 1120 1121 for (i = 0; i < SIS_TX_LIST_CNT; i++) { 1122 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1123 SIS_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT, 1124 &sc->sis_ldata->sis_tx_list[i].map) != 0) { 1125 printf(": can't create tx map\n"); 1126 goto fail_2; 1127 } 1128 } 1129 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_TX_LIST_CNT - 3, 1130 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) { 1131 printf(": can't create tx spare map\n"); 1132 goto fail_2; 1133 } 1134 1135 timeout_set(&sc->sis_timeout, sis_tick, sc); 1136 1137 ifp = &sc->arpcom.ac_if; 1138 ifp->if_softc = sc; 1139 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1140 ifp->if_ioctl = sis_ioctl; 1141 ifp->if_start = sis_start; 1142 ifp->if_watchdog = sis_watchdog; 1143 ifp->if_baudrate = 10000000; 1144 IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); 1145 IFQ_SET_READY(&ifp->if_snd); 1146 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1147 1148 ifp->if_capabilities = IFCAP_VLAN_MTU; 1149 1150 sc->sc_mii.mii_ifp = ifp; 1151 sc->sc_mii.mii_readreg = sis_miibus_readreg; 1152 sc->sc_mii.mii_writereg = sis_miibus_writereg; 1153 sc->sc_mii.mii_statchg = sis_miibus_statchg; 1154 ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts); 1155 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 1156 0); 1157 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1158 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1159 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1160 } else 1161 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1162 1163 /* 1164 * Call MI attach routines. 1165 */ 1166 if_attach(ifp); 1167 ether_ifattach(ifp); 1168 1169 shutdownhook_establish(sis_shutdown, sc); 1170 return; 1171 1172 fail_2: 1173 pci_intr_disestablish(pc, sc->sc_ih); 1174 1175 fail_1: 1176 bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size); 1177 } 1178 1179 /* 1180 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that 1181 * we arrange the descriptors in a closed ring, so that the last descriptor 1182 * points back to the first. 1183 */ 1184 int 1185 sis_ring_init(struct sis_softc *sc) 1186 { 1187 struct sis_list_data *ld; 1188 struct sis_ring_data *cd; 1189 int i, error, nexti; 1190 1191 cd = &sc->sis_cdata; 1192 ld = sc->sis_ldata; 1193 1194 for (i = 0; i < SIS_TX_LIST_CNT; i++) { 1195 if (i == (SIS_TX_LIST_CNT - 1)) 1196 nexti = 0; 1197 else 1198 nexti = i + 1; 1199 ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti]; 1200 ld->sis_tx_list[i].sis_next = sc->sc_listmap->dm_segs[0].ds_addr + 1201 offsetof(struct sis_list_data, sis_tx_list[nexti]); 1202 ld->sis_tx_list[i].sis_mbuf = NULL; 1203 ld->sis_tx_list[i].sis_ptr = 0; 1204 ld->sis_tx_list[i].sis_ctl = 0; 1205 } 1206 1207 cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0; 1208 1209 if (sc->arpcom.ac_if.if_flags & IFF_UP) 1210 sc->sc_rxbufs = SIS_RX_LIST_CNT_MAX; 1211 else 1212 sc->sc_rxbufs = SIS_RX_LIST_CNT_MIN; 1213 1214 for (i = 0; i < sc->sc_rxbufs; i++) { 1215 error = sis_newbuf(sc, &ld->sis_rx_list[i], NULL); 1216 if (error) 1217 return (error); 1218 if (i == (sc->sc_rxbufs - 1)) 1219 nexti = 0; 1220 else 1221 nexti = i + 1; 1222 ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti]; 1223 ld->sis_rx_list[i].sis_next = sc->sc_listmap->dm_segs[0].ds_addr + 1224 offsetof(struct sis_list_data, sis_rx_list[nexti]); 1225 } 1226 1227 cd->sis_rx_pdsc = &ld->sis_rx_list[0]; 1228 1229 return (0); 1230 } 1231 1232 /* 1233 * Initialize an RX descriptor and attach an MBUF cluster. 1234 */ 1235 int 1236 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) 1237 { 1238 struct mbuf *m_new = NULL; 1239 bus_dmamap_t map; 1240 1241 if (c == NULL) 1242 return (EINVAL); 1243 1244 if (m == NULL) { 1245 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1246 if (m_new == NULL) 1247 return (ENOBUFS); 1248 1249 MCLGET(m_new, M_DONTWAIT); 1250 if (!(m_new->m_flags & M_EXT)) { 1251 m_freem(m_new); 1252 return (ENOBUFS); 1253 } 1254 } else { 1255 m_new = m; 1256 m_new->m_data = m_new->m_ext.ext_buf; 1257 } 1258 1259 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1260 1261 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap, m_new, 1262 BUS_DMA_NOWAIT)) { 1263 m_freem(m_new); 1264 return (ENOBUFS); 1265 } 1266 1267 map = c->map; 1268 c->map = sc->sc_rx_sparemap; 1269 sc->sc_rx_sparemap = map; 1270 1271 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize, 1272 BUS_DMASYNC_PREREAD); 1273 1274 c->sis_mbuf = m_new; 1275 c->sis_ptr = c->map->dm_segs[0].ds_addr; 1276 c->sis_ctl = ETHER_MAX_DIX_LEN; 1277 1278 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1279 ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc), 1280 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1281 1282 return (0); 1283 } 1284 1285 /* 1286 * A frame has been uploaded: pass the resulting mbuf chain up to 1287 * the higher level protocols. 1288 */ 1289 void 1290 sis_rxeof(struct sis_softc *sc) 1291 { 1292 struct mbuf *m; 1293 struct ifnet *ifp; 1294 struct sis_desc *cur_rx; 1295 int total_len = 0; 1296 u_int32_t rxstat; 1297 1298 ifp = &sc->arpcom.ac_if; 1299 1300 for(cur_rx = sc->sis_cdata.sis_rx_pdsc; SIS_OWNDESC(cur_rx); 1301 cur_rx = cur_rx->sis_nextdesc) { 1302 1303 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1304 ((caddr_t)cur_rx - sc->sc_listkva), 1305 sizeof(struct sis_desc), 1306 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1307 1308 rxstat = cur_rx->sis_rxstat; 1309 m = cur_rx->sis_mbuf; 1310 cur_rx->sis_mbuf = NULL; 1311 total_len = SIS_RXBYTES(cur_rx); 1312 1313 /* 1314 * If an error occurs, update stats, clear the 1315 * status word and leave the mbuf cluster in place: 1316 * it should simply get re-used next time this descriptor 1317 * comes up in the ring. 1318 */ 1319 if (!(rxstat & SIS_CMDSTS_PKT_OK)) { 1320 ifp->if_ierrors++; 1321 if (rxstat & SIS_RXSTAT_COLL) 1322 ifp->if_collisions++; 1323 sis_newbuf(sc, cur_rx, m); 1324 continue; 1325 } 1326 1327 /* No errors; receive the packet. */ 1328 bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0, 1329 cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1330 #ifndef __STRICT_ALIGNMENT 1331 /* 1332 * On some architectures, we do not have alignment problems, 1333 * so try to allocate a new buffer for the receive ring, and 1334 * pass up the one where the packet is already, saving the 1335 * expensive copy done in m_devget(). 1336 * If we are on an architecture with alignment problems, or 1337 * if the allocation fails, then use m_devget and leave the 1338 * existing buffer in the receive ring. 1339 */ 1340 if (sis_newbuf(sc, cur_rx, NULL) == 0) { 1341 m->m_pkthdr.rcvif = ifp; 1342 m->m_pkthdr.len = m->m_len = total_len; 1343 } else 1344 #endif 1345 { 1346 struct mbuf *m0; 1347 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1348 total_len + ETHER_ALIGN, 0, ifp, NULL); 1349 sis_newbuf(sc, cur_rx, m); 1350 if (m0 == NULL) { 1351 ifp->if_ierrors++; 1352 continue; 1353 } 1354 m_adj(m0, ETHER_ALIGN); 1355 m = m0; 1356 } 1357 1358 ifp->if_ipackets++; 1359 1360 #if NBPFILTER > 0 1361 if (ifp->if_bpf) 1362 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1363 #endif 1364 1365 /* pass it on. */ 1366 ether_input_mbuf(ifp, m); 1367 } 1368 1369 sc->sis_cdata.sis_rx_pdsc = cur_rx; 1370 } 1371 1372 void 1373 sis_rxeoc(struct sis_softc *sc) 1374 { 1375 sis_rxeof(sc); 1376 sis_init(sc); 1377 } 1378 1379 /* 1380 * A frame was downloaded to the chip. It's safe for us to clean up 1381 * the list buffers. 1382 */ 1383 1384 void 1385 sis_txeof(struct sis_softc *sc) 1386 { 1387 struct ifnet *ifp; 1388 u_int32_t idx; 1389 1390 ifp = &sc->arpcom.ac_if; 1391 1392 /* 1393 * Go through our tx list and free mbufs for those 1394 * frames that have been transmitted. 1395 */ 1396 for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0; 1397 sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) { 1398 struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx]; 1399 1400 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1401 ((caddr_t)cur_tx - sc->sc_listkva), 1402 sizeof(struct sis_desc), 1403 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1404 1405 if (SIS_OWNDESC(cur_tx)) 1406 break; 1407 1408 if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) 1409 continue; 1410 1411 if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { 1412 ifp->if_oerrors++; 1413 if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) 1414 ifp->if_collisions++; 1415 if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) 1416 ifp->if_collisions++; 1417 } 1418 1419 ifp->if_collisions += 1420 (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; 1421 1422 ifp->if_opackets++; 1423 if (cur_tx->map->dm_nsegs != 0) { 1424 bus_dmamap_t map = cur_tx->map; 1425 1426 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1427 BUS_DMASYNC_POSTWRITE); 1428 bus_dmamap_unload(sc->sc_dmat, map); 1429 } 1430 if (cur_tx->sis_mbuf != NULL) { 1431 m_freem(cur_tx->sis_mbuf); 1432 cur_tx->sis_mbuf = NULL; 1433 } 1434 } 1435 1436 if (idx != sc->sis_cdata.sis_tx_cons) { 1437 /* we freed up some buffers */ 1438 sc->sis_cdata.sis_tx_cons = idx; 1439 ifp->if_flags &= ~IFF_OACTIVE; 1440 } 1441 1442 ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5; 1443 } 1444 1445 void 1446 sis_tick(void *xsc) 1447 { 1448 struct sis_softc *sc = (struct sis_softc *)xsc; 1449 struct mii_data *mii; 1450 struct ifnet *ifp; 1451 int s; 1452 1453 s = splnet(); 1454 1455 ifp = &sc->arpcom.ac_if; 1456 1457 mii = &sc->sc_mii; 1458 mii_tick(mii); 1459 1460 if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && 1461 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1462 sc->sis_link++; 1463 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1464 sis_start(ifp); 1465 } 1466 timeout_add_sec(&sc->sis_timeout, 1); 1467 1468 splx(s); 1469 } 1470 1471 int 1472 sis_intr(void *arg) 1473 { 1474 struct sis_softc *sc; 1475 struct ifnet *ifp; 1476 u_int32_t status; 1477 int claimed = 0; 1478 1479 sc = arg; 1480 ifp = &sc->arpcom.ac_if; 1481 1482 if (sc->sis_stopped) /* Most likely shared interrupt */ 1483 return (claimed); 1484 1485 /* Disable interrupts. */ 1486 CSR_WRITE_4(sc, SIS_IER, 0); 1487 1488 for (;;) { 1489 /* Reading the ISR register clears all interrupts. */ 1490 status = CSR_READ_4(sc, SIS_ISR); 1491 1492 if ((status & SIS_INTRS) == 0) 1493 break; 1494 1495 claimed = 1; 1496 1497 if (status & 1498 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | 1499 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE)) 1500 sis_txeof(sc); 1501 1502 if (status & 1503 (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | 1504 SIS_ISR_RX_IDLE)) 1505 sis_rxeof(sc); 1506 1507 if (status & (SIS_ISR_RX_ERR | SIS_ISR_RX_OFLOW)) 1508 sis_rxeoc(sc); 1509 1510 #if 0 1511 if (status & (SIS_ISR_RX_IDLE)) 1512 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1513 #endif 1514 1515 if (status & SIS_ISR_SYSERR) { 1516 sis_reset(sc); 1517 sis_init(sc); 1518 } 1519 } 1520 1521 /* Re-enable interrupts. */ 1522 CSR_WRITE_4(sc, SIS_IER, 1); 1523 1524 /* 1525 * XXX: Re-enable RX engine every time otherwise it occasionally 1526 * stops under unknown circumstances. 1527 */ 1528 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1529 1530 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1531 sis_start(ifp); 1532 1533 return (claimed); 1534 } 1535 1536 /* 1537 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1538 * pointers to the fragment pointers. 1539 */ 1540 int 1541 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1542 { 1543 struct sis_desc *f = NULL; 1544 int frag, cur, i; 1545 bus_dmamap_t map; 1546 1547 map = sc->sc_tx_sparemap; 1548 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, 1549 m_head, BUS_DMA_NOWAIT) != 0) 1550 return (ENOBUFS); 1551 1552 /* 1553 * Start packing the mbufs in this chain into 1554 * the fragment pointers. Stop when we run out 1555 * of fragments or hit the end of the mbuf chain. 1556 */ 1557 cur = frag = *txidx; 1558 1559 for (i = 0; i < map->dm_nsegs; i++) { 1560 if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + i)) < 2) 1561 return(ENOBUFS); 1562 f = &sc->sis_ldata->sis_tx_list[frag]; 1563 f->sis_ctl = SIS_CMDSTS_MORE | map->dm_segs[i].ds_len; 1564 f->sis_ptr = map->dm_segs[i].ds_addr; 1565 if (i != 0) 1566 f->sis_ctl |= SIS_CMDSTS_OWN; 1567 cur = frag; 1568 SIS_INC(frag, SIS_TX_LIST_CNT); 1569 } 1570 1571 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1572 BUS_DMASYNC_PREWRITE); 1573 1574 sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head; 1575 sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; 1576 sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; 1577 sc->sis_cdata.sis_tx_cnt += i; 1578 *txidx = frag; 1579 1580 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 1581 offsetof(struct sis_list_data, sis_tx_list[0]), 1582 sizeof(struct sis_desc) * SIS_TX_LIST_CNT, 1583 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1584 1585 return (0); 1586 } 1587 1588 /* 1589 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1590 * to the mbuf data regions directly in the transmit lists. We also save a 1591 * copy of the pointers since the transmit list fragment pointers are 1592 * physical addresses. 1593 */ 1594 1595 void 1596 sis_start(struct ifnet *ifp) 1597 { 1598 struct sis_softc *sc; 1599 struct mbuf *m_head = NULL; 1600 u_int32_t idx, queued = 0; 1601 1602 sc = ifp->if_softc; 1603 1604 if (!sc->sis_link) 1605 return; 1606 1607 idx = sc->sis_cdata.sis_tx_prod; 1608 1609 if (ifp->if_flags & IFF_OACTIVE) 1610 return; 1611 1612 while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) { 1613 IFQ_POLL(&ifp->if_snd, m_head); 1614 if (m_head == NULL) 1615 break; 1616 1617 if (sis_encap(sc, m_head, &idx)) { 1618 ifp->if_flags |= IFF_OACTIVE; 1619 break; 1620 } 1621 1622 /* now we are committed to transmit the packet */ 1623 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1624 1625 queued++; 1626 1627 /* 1628 * If there's a BPF listener, bounce a copy of this frame 1629 * to him. 1630 */ 1631 #if NBPFILTER > 0 1632 if (ifp->if_bpf) 1633 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1634 #endif 1635 } 1636 1637 if (queued) { 1638 /* Transmit */ 1639 sc->sis_cdata.sis_tx_prod = idx; 1640 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); 1641 1642 /* 1643 * Set a timeout in case the chip goes out to lunch. 1644 */ 1645 ifp->if_timer = 5; 1646 } 1647 } 1648 1649 void 1650 sis_init(void *xsc) 1651 { 1652 struct sis_softc *sc = (struct sis_softc *)xsc; 1653 struct ifnet *ifp = &sc->arpcom.ac_if; 1654 struct mii_data *mii; 1655 int s; 1656 1657 s = splnet(); 1658 1659 /* 1660 * Cancel pending I/O and free all RX/TX buffers. 1661 */ 1662 sis_stop(sc); 1663 1664 #if NS_IHR_DELAY > 0 1665 /* Configure interrupt holdoff register. */ 1666 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A) 1667 CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE); 1668 #endif 1669 1670 mii = &sc->sc_mii; 1671 1672 /* Set MAC address */ 1673 if (sc->sis_type == SIS_TYPE_83815) { 1674 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); 1675 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1676 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1677 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); 1678 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1679 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1680 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); 1681 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1682 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1683 } else { 1684 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 1685 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1686 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1687 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 1688 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1689 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1690 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 1691 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1692 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1693 } 1694 1695 /* Init circular TX/RX lists. */ 1696 if (sis_ring_init(sc) != 0) { 1697 printf("%s: initialization failed: no memory for rx buffers\n", 1698 sc->sc_dev.dv_xname); 1699 sis_stop(sc); 1700 splx(s); 1701 return; 1702 } 1703 1704 /* 1705 * Short Cable Receive Errors (MP21.E) 1706 * also: Page 78 of the DP83815 data sheet (september 2002 version) 1707 * recommends the following register settings "for optimum 1708 * performance." for rev 15C. The driver from NS also sets 1709 * the PHY_CR register for later versions. 1710 */ 1711 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { 1712 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1713 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); 1714 if (sc->sis_srr == NS_SRR_15C) { 1715 /* set val for c2 */ 1716 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); 1717 /* load/kill c2 */ 1718 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); 1719 /* rais SD off, from 4 to c */ 1720 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); 1721 } 1722 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1723 } 1724 1725 /* 1726 * For the NatSemi chip, we have to explicitly enable the 1727 * reception of ARP frames, as well as turn on the 'perfect 1728 * match' filter where we store the station address, otherwise 1729 * we won't receive unicasts meant for this host. 1730 */ 1731 if (sc->sis_type == SIS_TYPE_83815) { 1732 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); 1733 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); 1734 } 1735 1736 /* 1737 * Set the capture broadcast bit to capture broadcast frames. 1738 */ 1739 if (ifp->if_flags & IFF_BROADCAST) 1740 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1741 else 1742 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1743 1744 /* Set promiscuous mode. */ 1745 sis_setpromisc(sc); 1746 1747 /* 1748 * Load the multicast filter. 1749 */ 1750 sis_setmulti(sc); 1751 1752 /* Turn the receive filter on */ 1753 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 1754 1755 /* 1756 * Load the address of the RX and TX lists. 1757 */ 1758 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr + 1759 offsetof(struct sis_list_data, sis_rx_list[0])); 1760 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr + 1761 offsetof(struct sis_list_data, sis_tx_list[0])); 1762 1763 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of 1764 * the PCI bus. When this bit is set, the Max DMA Burst Size 1765 * for TX/RX DMA should be no larger than 16 double words. 1766 */ 1767 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) 1768 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); 1769 else 1770 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); 1771 1772 /* Accept Long Packets for VLAN support */ 1773 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); 1774 1775 /* Set TX configuration */ 1776 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) 1777 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); 1778 else 1779 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 1780 1781 /* Set full/half duplex mode. */ 1782 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1783 SIS_SETBIT(sc, SIS_TX_CFG, 1784 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1785 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1786 } else { 1787 SIS_CLRBIT(sc, SIS_TX_CFG, 1788 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1789 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1790 } 1791 1792 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { 1793 /* 1794 * MPII03.D: Half Duplex Excessive Collisions. 1795 * Also page 49 in 83816 manual 1796 */ 1797 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); 1798 } 1799 1800 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && 1801 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 1802 uint32_t reg; 1803 1804 /* 1805 * Short Cable Receive Errors (MP21.E) 1806 */ 1807 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1808 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; 1809 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); 1810 DELAY(100000); 1811 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; 1812 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { 1813 #ifdef DEBUG 1814 printf("%s: Applying short cable fix (reg=%x)\n", 1815 sc->sc_dev.dv_xname, reg); 1816 #endif 1817 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); 1818 reg = CSR_READ_4(sc, NS_PHY_DSPCFG); 1819 SIS_SETBIT(sc, NS_PHY_DSPCFG, reg | 0x20); 1820 } 1821 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1822 } 1823 1824 /* 1825 * Enable interrupts. 1826 */ 1827 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); 1828 CSR_WRITE_4(sc, SIS_IER, 1); 1829 1830 /* Enable receiver and transmitter. */ 1831 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 1832 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1833 1834 #ifdef notdef 1835 mii_mediachg(mii); 1836 #endif 1837 1838 sc->sis_stopped = 0; 1839 ifp->if_flags |= IFF_RUNNING; 1840 ifp->if_flags &= ~IFF_OACTIVE; 1841 1842 splx(s); 1843 1844 timeout_add_sec(&sc->sis_timeout, 1); 1845 } 1846 1847 /* 1848 * Set media options. 1849 */ 1850 int 1851 sis_ifmedia_upd(struct ifnet *ifp) 1852 { 1853 struct sis_softc *sc; 1854 struct mii_data *mii; 1855 1856 sc = ifp->if_softc; 1857 1858 mii = &sc->sc_mii; 1859 sc->sis_link = 0; 1860 if (mii->mii_instance) { 1861 struct mii_softc *miisc; 1862 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1863 mii_phy_reset(miisc); 1864 } 1865 mii_mediachg(mii); 1866 1867 return (0); 1868 } 1869 1870 /* 1871 * Report current media status. 1872 */ 1873 void 1874 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1875 { 1876 struct sis_softc *sc; 1877 struct mii_data *mii; 1878 1879 sc = ifp->if_softc; 1880 1881 mii = &sc->sc_mii; 1882 mii_pollstat(mii); 1883 ifmr->ifm_active = mii->mii_media_active; 1884 ifmr->ifm_status = mii->mii_media_status; 1885 } 1886 1887 int 1888 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1889 { 1890 struct sis_softc *sc = ifp->if_softc; 1891 struct ifreq *ifr = (struct ifreq *) data; 1892 struct ifaddr *ifa = (struct ifaddr *)data; 1893 struct mii_data *mii; 1894 int s, error = 0; 1895 1896 s = splnet(); 1897 1898 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1899 splx(s); 1900 return error; 1901 } 1902 1903 switch(command) { 1904 case SIOCSIFADDR: 1905 ifp->if_flags |= IFF_UP; 1906 if (!(ifp->if_flags & IFF_RUNNING)) 1907 sis_init(sc); 1908 #ifdef INET 1909 if (ifa->ifa_addr->sa_family == AF_INET) 1910 arp_ifinit(&sc->arpcom, ifa); 1911 #endif 1912 break; 1913 case SIOCSIFFLAGS: 1914 if (ifp->if_flags & IFF_UP) { 1915 if (ifp->if_flags & IFF_RUNNING && 1916 (ifp->if_flags ^ sc->sc_if_flags) & 1917 IFF_PROMISC) { 1918 sis_setpromisc(sc); 1919 sis_setmulti(sc); 1920 } else if (ifp->if_flags & IFF_RUNNING && 1921 (ifp->if_flags ^ sc->sc_if_flags) & 1922 IFF_ALLMULTI) { 1923 sis_setmulti(sc); 1924 } else { 1925 if (!(ifp->if_flags & IFF_RUNNING)) 1926 sis_init(sc); 1927 } 1928 } else { 1929 if (ifp->if_flags & IFF_RUNNING) 1930 sis_stop(sc); 1931 } 1932 sc->sc_if_flags = ifp->if_flags; 1933 break; 1934 case SIOCSIFMTU: 1935 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 1936 error = EINVAL; 1937 else if (ifp->if_mtu != ifr->ifr_mtu) 1938 ifp->if_mtu = ifr->ifr_mtu; 1939 break; 1940 case SIOCADDMULTI: 1941 case SIOCDELMULTI: 1942 error = (command == SIOCADDMULTI) ? 1943 ether_addmulti(ifr, &sc->arpcom) : 1944 ether_delmulti(ifr, &sc->arpcom); 1945 1946 if (error == ENETRESET) { 1947 /* 1948 * Multicast list has changed; set the hardware 1949 * filter accordingly. 1950 */ 1951 if (ifp->if_flags & IFF_RUNNING) 1952 sis_setmulti(sc); 1953 error = 0; 1954 } 1955 break; 1956 case SIOCGIFMEDIA: 1957 case SIOCSIFMEDIA: 1958 mii = &sc->sc_mii; 1959 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1960 break; 1961 default: 1962 error = ENOTTY; 1963 break; 1964 } 1965 1966 splx(s); 1967 1968 return(error); 1969 } 1970 1971 void 1972 sis_watchdog(struct ifnet *ifp) 1973 { 1974 struct sis_softc *sc; 1975 int s; 1976 1977 sc = ifp->if_softc; 1978 1979 if (sc->sis_stopped) 1980 return; 1981 1982 ifp->if_oerrors++; 1983 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1984 1985 s = splnet(); 1986 sis_stop(sc); 1987 sis_reset(sc); 1988 sis_init(sc); 1989 1990 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1991 sis_start(ifp); 1992 1993 splx(s); 1994 } 1995 1996 /* 1997 * Stop the adapter and free any mbufs allocated to the 1998 * RX and TX lists. 1999 */ 2000 void 2001 sis_stop(struct sis_softc *sc) 2002 { 2003 int i; 2004 struct ifnet *ifp; 2005 2006 if (sc->sis_stopped) 2007 return; 2008 2009 ifp = &sc->arpcom.ac_if; 2010 ifp->if_timer = 0; 2011 2012 timeout_del(&sc->sis_timeout); 2013 2014 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2015 sc->sis_stopped = 1; 2016 2017 CSR_WRITE_4(sc, SIS_IER, 0); 2018 CSR_WRITE_4(sc, SIS_IMR, 0); 2019 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ 2020 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2021 DELAY(1000); 2022 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); 2023 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); 2024 2025 sc->sis_link = 0; 2026 2027 /* 2028 * Free data in the RX lists. 2029 */ 2030 for (i = 0; i < SIS_RX_LIST_CNT_MAX; i++) { 2031 if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) { 2032 bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map; 2033 2034 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2035 BUS_DMASYNC_POSTREAD); 2036 bus_dmamap_unload(sc->sc_dmat, map); 2037 } 2038 if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) { 2039 m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf); 2040 sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL; 2041 } 2042 bzero((char *)&sc->sis_ldata->sis_rx_list[i], 2043 sizeof(struct sis_desc) - sizeof(bus_dmamap_t)); 2044 } 2045 2046 /* 2047 * Free the TX list buffers. 2048 */ 2049 for (i = 0; i < SIS_TX_LIST_CNT; i++) { 2050 if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) { 2051 bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map; 2052 2053 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2054 BUS_DMASYNC_POSTWRITE); 2055 bus_dmamap_unload(sc->sc_dmat, map); 2056 } 2057 if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) { 2058 m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf); 2059 sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL; 2060 } 2061 bzero((char *)&sc->sis_ldata->sis_tx_list[i], 2062 sizeof(struct sis_desc) - sizeof(bus_dmamap_t)); 2063 } 2064 } 2065 2066 /* 2067 * Stop all chip I/O so that the kernel's probe routines don't 2068 * get confused by errant DMAs when rebooting. 2069 */ 2070 void 2071 sis_shutdown(void *v) 2072 { 2073 struct sis_softc *sc = (struct sis_softc *)v; 2074 2075 sis_stop(sc); 2076 } 2077