1 /* $OpenBSD: if_ste.c,v 1.47 2011/06/22 16:44:27 tedu Exp $ */ 2 /* 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $ 34 */ 35 36 #include "bpfilter.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/timeout.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_types.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <net/if_media.h> 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <uvm/uvm_extern.h> /* for vtophys */ 68 69 #include <sys/device.h> 70 71 #include <dev/mii/mii.h> 72 #include <dev/mii/miivar.h> 73 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcidevs.h> 77 78 #define STE_USEIOSPACE 79 80 #include <dev/pci/if_stereg.h> 81 82 int ste_probe(struct device *, void *, void *); 83 void ste_attach(struct device *, struct device *, void *); 84 int ste_intr(void *); 85 void ste_init(void *); 86 void ste_rxeoc(struct ste_softc *); 87 void ste_rxeof(struct ste_softc *); 88 void ste_txeoc(struct ste_softc *); 89 void ste_txeof(struct ste_softc *); 90 void ste_stats_update(void *); 91 void ste_stop(struct ste_softc *); 92 void ste_reset(struct ste_softc *); 93 int ste_ioctl(struct ifnet *, u_long, caddr_t); 94 int ste_encap(struct ste_softc *, struct ste_chain *, 95 struct mbuf *); 96 void ste_start(struct ifnet *); 97 void ste_watchdog(struct ifnet *); 98 int ste_newbuf(struct ste_softc *, 99 struct ste_chain_onefrag *, 100 struct mbuf *); 101 int ste_ifmedia_upd(struct ifnet *); 102 void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103 104 void ste_mii_sync(struct ste_softc *); 105 void ste_mii_send(struct ste_softc *, u_int32_t, int); 106 int ste_mii_readreg(struct ste_softc *, 107 struct ste_mii_frame *); 108 int ste_mii_writereg(struct ste_softc *, 109 struct ste_mii_frame *); 110 int ste_miibus_readreg(struct device *, int, int); 111 void ste_miibus_writereg(struct device *, int, int, int); 112 void ste_miibus_statchg(struct device *); 113 114 int ste_eeprom_wait(struct ste_softc *); 115 int ste_read_eeprom(struct ste_softc *, caddr_t, int, 116 int, int); 117 void ste_wait(struct ste_softc *); 118 void ste_setmulti(struct ste_softc *); 119 int ste_init_rx_list(struct ste_softc *); 120 void ste_init_tx_list(struct ste_softc *); 121 122 #define STE_SETBIT4(sc, reg, x) \ 123 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 124 125 #define STE_CLRBIT4(sc, reg, x) \ 126 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 127 128 #define STE_SETBIT2(sc, reg, x) \ 129 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x) 130 131 #define STE_CLRBIT2(sc, reg, x) \ 132 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x) 133 134 #define STE_SETBIT1(sc, reg, x) \ 135 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x) 136 137 #define STE_CLRBIT1(sc, reg, x) \ 138 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x) 139 140 141 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 142 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 143 144 struct cfattach ste_ca = { 145 sizeof(struct ste_softc), ste_probe, ste_attach 146 }; 147 148 struct cfdriver ste_cd = { 149 NULL, "ste", DV_IFNET 150 }; 151 152 /* 153 * Sync the PHYs by setting data bit and strobing the clock 32 times. 154 */ 155 void 156 ste_mii_sync(struct ste_softc *sc) 157 { 158 int i; 159 160 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 161 162 for (i = 0; i < 32; i++) { 163 MII_SET(STE_PHYCTL_MCLK); 164 DELAY(1); 165 MII_CLR(STE_PHYCTL_MCLK); 166 DELAY(1); 167 } 168 169 return; 170 } 171 172 /* 173 * Clock a series of bits through the MII. 174 */ 175 void 176 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt) 177 { 178 int i; 179 180 MII_CLR(STE_PHYCTL_MCLK); 181 182 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 183 if (bits & i) { 184 MII_SET(STE_PHYCTL_MDATA); 185 } else { 186 MII_CLR(STE_PHYCTL_MDATA); 187 } 188 DELAY(1); 189 MII_CLR(STE_PHYCTL_MCLK); 190 DELAY(1); 191 MII_SET(STE_PHYCTL_MCLK); 192 } 193 } 194 195 /* 196 * Read an PHY register through the MII. 197 */ 198 int 199 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame) 200 { 201 int ack, i, s; 202 203 s = splnet(); 204 205 /* 206 * Set up frame for RX. 207 */ 208 frame->mii_stdelim = STE_MII_STARTDELIM; 209 frame->mii_opcode = STE_MII_READOP; 210 frame->mii_turnaround = 0; 211 frame->mii_data = 0; 212 213 CSR_WRITE_2(sc, STE_PHYCTL, 0); 214 /* 215 * Turn on data xmit. 216 */ 217 MII_SET(STE_PHYCTL_MDIR); 218 219 ste_mii_sync(sc); 220 221 /* 222 * Send command/address info. 223 */ 224 ste_mii_send(sc, frame->mii_stdelim, 2); 225 ste_mii_send(sc, frame->mii_opcode, 2); 226 ste_mii_send(sc, frame->mii_phyaddr, 5); 227 ste_mii_send(sc, frame->mii_regaddr, 5); 228 229 /* Turn off xmit. */ 230 MII_CLR(STE_PHYCTL_MDIR); 231 232 /* Idle bit */ 233 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 234 DELAY(1); 235 MII_SET(STE_PHYCTL_MCLK); 236 DELAY(1); 237 238 /* Check for ack */ 239 MII_CLR(STE_PHYCTL_MCLK); 240 DELAY(1); 241 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 242 MII_SET(STE_PHYCTL_MCLK); 243 DELAY(1); 244 245 /* 246 * Now try reading data bits. If the ack failed, we still 247 * need to clock through 16 cycles to keep the PHY(s) in sync. 248 */ 249 if (ack) { 250 for(i = 0; i < 16; i++) { 251 MII_CLR(STE_PHYCTL_MCLK); 252 DELAY(1); 253 MII_SET(STE_PHYCTL_MCLK); 254 DELAY(1); 255 } 256 goto fail; 257 } 258 259 for (i = 0x8000; i; i >>= 1) { 260 MII_CLR(STE_PHYCTL_MCLK); 261 DELAY(1); 262 if (!ack) { 263 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 264 frame->mii_data |= i; 265 DELAY(1); 266 } 267 MII_SET(STE_PHYCTL_MCLK); 268 DELAY(1); 269 } 270 271 fail: 272 273 MII_CLR(STE_PHYCTL_MCLK); 274 DELAY(1); 275 MII_SET(STE_PHYCTL_MCLK); 276 DELAY(1); 277 278 splx(s); 279 280 if (ack) 281 return(1); 282 return(0); 283 } 284 285 /* 286 * Write to a PHY register through the MII. 287 */ 288 int 289 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame) 290 { 291 int s; 292 293 s = splnet(); 294 /* 295 * Set up frame for TX. 296 */ 297 298 frame->mii_stdelim = STE_MII_STARTDELIM; 299 frame->mii_opcode = STE_MII_WRITEOP; 300 frame->mii_turnaround = STE_MII_TURNAROUND; 301 302 /* 303 * Turn on data output. 304 */ 305 MII_SET(STE_PHYCTL_MDIR); 306 307 ste_mii_sync(sc); 308 309 ste_mii_send(sc, frame->mii_stdelim, 2); 310 ste_mii_send(sc, frame->mii_opcode, 2); 311 ste_mii_send(sc, frame->mii_phyaddr, 5); 312 ste_mii_send(sc, frame->mii_regaddr, 5); 313 ste_mii_send(sc, frame->mii_turnaround, 2); 314 ste_mii_send(sc, frame->mii_data, 16); 315 316 /* Idle bit. */ 317 MII_SET(STE_PHYCTL_MCLK); 318 DELAY(1); 319 MII_CLR(STE_PHYCTL_MCLK); 320 DELAY(1); 321 322 /* 323 * Turn off xmit. 324 */ 325 MII_CLR(STE_PHYCTL_MDIR); 326 327 splx(s); 328 329 return(0); 330 } 331 332 int 333 ste_miibus_readreg(struct device *self, int phy, int reg) 334 { 335 struct ste_softc *sc = (struct ste_softc *)self; 336 struct ste_mii_frame frame; 337 338 if (sc->ste_one_phy && phy != 0) 339 return (0); 340 341 bzero(&frame, sizeof(frame)); 342 343 frame.mii_phyaddr = phy; 344 frame.mii_regaddr = reg; 345 ste_mii_readreg(sc, &frame); 346 347 return(frame.mii_data); 348 } 349 350 void 351 ste_miibus_writereg(struct device *self, int phy, int reg, int data) 352 { 353 struct ste_softc *sc = (struct ste_softc *)self; 354 struct ste_mii_frame frame; 355 356 bzero(&frame, sizeof(frame)); 357 358 frame.mii_phyaddr = phy; 359 frame.mii_regaddr = reg; 360 frame.mii_data = data; 361 362 ste_mii_writereg(sc, &frame); 363 364 return; 365 } 366 367 void 368 ste_miibus_statchg(struct device *self) 369 { 370 struct ste_softc *sc = (struct ste_softc *)self; 371 struct mii_data *mii; 372 int fdx, fcur; 373 374 mii = &sc->sc_mii; 375 376 fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX; 377 fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX; 378 379 if ((fcur && fdx) || (! fcur && ! fdx)) 380 return; 381 382 STE_SETBIT4(sc, STE_DMACTL, 383 STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL); 384 ste_wait(sc); 385 386 if (fdx) 387 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 388 else 389 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 390 391 STE_SETBIT4(sc, STE_DMACTL, 392 STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL); 393 394 return; 395 } 396 397 int 398 ste_ifmedia_upd(struct ifnet *ifp) 399 { 400 struct ste_softc *sc; 401 struct mii_data *mii; 402 403 sc = ifp->if_softc; 404 mii = &sc->sc_mii; 405 sc->ste_link = 0; 406 if (mii->mii_instance) { 407 struct mii_softc *miisc; 408 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 409 mii_phy_reset(miisc); 410 } 411 mii_mediachg(mii); 412 413 return(0); 414 } 415 416 void 417 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 418 { 419 struct ste_softc *sc; 420 struct mii_data *mii; 421 422 sc = ifp->if_softc; 423 mii = &sc->sc_mii; 424 425 mii_pollstat(mii); 426 ifmr->ifm_active = mii->mii_media_active; 427 ifmr->ifm_status = mii->mii_media_status; 428 429 return; 430 } 431 432 void 433 ste_wait(struct ste_softc *sc) 434 { 435 int i; 436 437 for (i = 0; i < STE_TIMEOUT; i++) { 438 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 439 break; 440 } 441 442 if (i == STE_TIMEOUT) 443 printf("%s: command never completed!\n", sc->sc_dev.dv_xname); 444 445 return; 446 } 447 448 /* 449 * The EEPROM is slow: give it time to come ready after issuing 450 * it a command. 451 */ 452 int 453 ste_eeprom_wait(struct ste_softc *sc) 454 { 455 int i; 456 457 DELAY(1000); 458 459 for (i = 0; i < 100; i++) { 460 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 461 DELAY(1000); 462 else 463 break; 464 } 465 466 if (i == 100) { 467 printf("%s: eeprom failed to come ready\n", 468 sc->sc_dev.dv_xname); 469 return(1); 470 } 471 472 return(0); 473 } 474 475 /* 476 * Read a sequence of words from the EEPROM. Note that ethernet address 477 * data is stored in the EEPROM in network byte order. 478 */ 479 int 480 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap) 481 { 482 int err = 0, i; 483 u_int16_t word = 0, *ptr; 484 485 if (ste_eeprom_wait(sc)) 486 return(1); 487 488 for (i = 0; i < cnt; i++) { 489 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 490 err = ste_eeprom_wait(sc); 491 if (err) 492 break; 493 word = CSR_READ_2(sc, STE_EEPROM_DATA); 494 ptr = (u_int16_t *)(dest + (i * 2)); 495 if (swap) 496 *ptr = ntohs(word); 497 else 498 *ptr = word; 499 } 500 501 return(err ? 1 : 0); 502 } 503 504 void 505 ste_setmulti(struct ste_softc *sc) 506 { 507 struct ifnet *ifp; 508 struct arpcom *ac = &sc->arpcom; 509 struct ether_multi *enm; 510 struct ether_multistep step; 511 int h = 0; 512 u_int32_t hashes[2] = { 0, 0 }; 513 514 ifp = &sc->arpcom.ac_if; 515 allmulti: 516 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 517 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 518 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 519 return; 520 } 521 522 /* first, zot all the existing hash bits */ 523 CSR_WRITE_2(sc, STE_MAR0, 0); 524 CSR_WRITE_2(sc, STE_MAR1, 0); 525 CSR_WRITE_2(sc, STE_MAR2, 0); 526 CSR_WRITE_2(sc, STE_MAR3, 0); 527 528 /* now program new ones */ 529 ETHER_FIRST_MULTI(step, ac, enm); 530 while (enm != NULL) { 531 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 532 ifp->if_flags |= IFF_ALLMULTI; 533 goto allmulti; 534 } 535 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3F; 536 if (h < 32) 537 hashes[0] |= (1 << h); 538 else 539 hashes[1] |= (1 << (h - 32)); 540 ETHER_NEXT_MULTI(step, enm); 541 } 542 543 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 544 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 545 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 546 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 547 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 548 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 549 550 return; 551 } 552 553 int 554 ste_intr(void *xsc) 555 { 556 struct ste_softc *sc; 557 struct ifnet *ifp; 558 u_int16_t status; 559 int claimed = 0; 560 561 sc = xsc; 562 ifp = &sc->arpcom.ac_if; 563 564 /* See if this is really our interrupt. */ 565 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) 566 return claimed; 567 568 for (;;) { 569 status = CSR_READ_2(sc, STE_ISR_ACK); 570 571 if (!(status & STE_INTRS)) 572 break; 573 574 claimed = 1; 575 576 if (status & STE_ISR_RX_DMADONE) { 577 ste_rxeoc(sc); 578 ste_rxeof(sc); 579 } 580 581 if (status & STE_ISR_TX_DMADONE) 582 ste_txeof(sc); 583 584 if (status & STE_ISR_TX_DONE) 585 ste_txeoc(sc); 586 587 if (status & STE_ISR_STATS_OFLOW) { 588 timeout_del(&sc->sc_stats_tmo); 589 ste_stats_update(sc); 590 } 591 592 if (status & STE_ISR_LINKEVENT) 593 mii_pollstat(&sc->sc_mii); 594 595 if (status & STE_ISR_HOSTERR) { 596 ste_reset(sc); 597 ste_init(sc); 598 } 599 } 600 601 /* Re-enable interrupts */ 602 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 603 604 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 605 ste_start(ifp); 606 607 return claimed; 608 } 609 610 void 611 ste_rxeoc(struct ste_softc *sc) 612 { 613 struct ste_chain_onefrag *cur_rx; 614 615 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { 616 cur_rx = sc->ste_cdata.ste_rx_head; 617 do { 618 cur_rx = cur_rx->ste_next; 619 /* If the ring is empty, just return. */ 620 if (cur_rx == sc->ste_cdata.ste_rx_head) 621 return; 622 } while (cur_rx->ste_ptr->ste_status == 0); 623 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { 624 /* We've fallen behind the chip: catch it. */ 625 sc->ste_cdata.ste_rx_head = cur_rx; 626 } 627 } 628 } 629 630 /* 631 * A frame has been uploaded: pass the resulting mbuf chain up to 632 * the higher level protocols. 633 */ 634 void 635 ste_rxeof(struct ste_softc *sc) 636 { 637 struct mbuf *m; 638 struct ifnet *ifp; 639 struct ste_chain_onefrag *cur_rx; 640 int total_len = 0, count=0; 641 u_int32_t rxstat; 642 643 ifp = &sc->arpcom.ac_if; 644 645 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) 646 & STE_RXSTAT_DMADONE) { 647 if ((STE_RX_LIST_CNT - count) < 3) 648 break; 649 650 cur_rx = sc->ste_cdata.ste_rx_head; 651 sc->ste_cdata.ste_rx_head = cur_rx->ste_next; 652 653 /* 654 * If an error occurs, update stats, clear the 655 * status word and leave the mbuf cluster in place: 656 * it should simply get re-used next time this descriptor 657 * comes up in the ring. 658 */ 659 if (rxstat & STE_RXSTAT_FRAME_ERR) { 660 ifp->if_ierrors++; 661 cur_rx->ste_ptr->ste_status = 0; 662 continue; 663 } 664 665 /* 666 * If there error bit was not set, the upload complete 667 * bit should be set which means we have a valid packet. 668 * If not, something truly strange has happened. 669 */ 670 if (!(rxstat & STE_RXSTAT_DMADONE)) { 671 printf("%s: bad receive status -- packet dropped", 672 sc->sc_dev.dv_xname); 673 ifp->if_ierrors++; 674 cur_rx->ste_ptr->ste_status = 0; 675 continue; 676 } 677 678 /* No errors; receive the packet. */ 679 m = cur_rx->ste_mbuf; 680 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; 681 682 /* 683 * Try to conjure up a new mbuf cluster. If that 684 * fails, it means we have an out of memory condition and 685 * should leave the buffer in place and continue. This will 686 * result in a lost packet, but there's little else we 687 * can do in this situation. 688 */ 689 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 690 ifp->if_ierrors++; 691 cur_rx->ste_ptr->ste_status = 0; 692 continue; 693 } 694 695 m->m_pkthdr.rcvif = ifp; 696 m->m_pkthdr.len = m->m_len = total_len; 697 698 ifp->if_ipackets++; 699 700 #if NBPFILTER > 0 701 if (ifp->if_bpf) 702 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 703 #endif 704 705 /* pass it on. */ 706 ether_input_mbuf(ifp, m); 707 708 cur_rx->ste_ptr->ste_status = 0; 709 count++; 710 } 711 712 return; 713 } 714 715 void 716 ste_txeoc(struct ste_softc *sc) 717 { 718 u_int8_t txstat; 719 struct ifnet *ifp; 720 721 ifp = &sc->arpcom.ac_if; 722 723 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & 724 STE_TXSTATUS_TXDONE) { 725 if (txstat & STE_TXSTATUS_UNDERRUN || 726 txstat & STE_TXSTATUS_EXCESSCOLLS || 727 txstat & STE_TXSTATUS_RECLAIMERR) { 728 ifp->if_oerrors++; 729 printf("%s: transmission error: %x\n", 730 sc->sc_dev.dv_xname, txstat); 731 732 ste_reset(sc); 733 ste_init(sc); 734 735 if (txstat & STE_TXSTATUS_UNDERRUN && 736 sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) { 737 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 738 printf("%s: tx underrun, increasing tx" 739 " start threshold to %d bytes\n", 740 sc->sc_dev.dv_xname, sc->ste_tx_thresh); 741 } 742 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 743 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, 744 (ETHER_MAX_DIX_LEN >> 4)); 745 } 746 ste_init(sc); 747 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 748 } 749 750 return; 751 } 752 753 void 754 ste_txeof(struct ste_softc *sc) 755 { 756 struct ste_chain *cur_tx = NULL; 757 struct ifnet *ifp; 758 int idx; 759 760 ifp = &sc->arpcom.ac_if; 761 762 idx = sc->ste_cdata.ste_tx_cons; 763 while(idx != sc->ste_cdata.ste_tx_prod) { 764 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 765 766 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) 767 break; 768 769 m_freem(cur_tx->ste_mbuf); 770 cur_tx->ste_mbuf = NULL; 771 ifp->if_flags &= ~IFF_OACTIVE; 772 ifp->if_opackets++; 773 774 STE_INC(idx, STE_TX_LIST_CNT); 775 } 776 777 sc->ste_cdata.ste_tx_cons = idx; 778 if (idx == sc->ste_cdata.ste_tx_prod) 779 ifp->if_timer = 0; 780 781 return; 782 } 783 784 void 785 ste_stats_update(void *xsc) 786 { 787 struct ste_softc *sc; 788 struct ifnet *ifp; 789 struct mii_data *mii; 790 int s; 791 792 s = splnet(); 793 794 sc = xsc; 795 ifp = &sc->arpcom.ac_if; 796 mii = &sc->sc_mii; 797 798 ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS) 799 + CSR_READ_1(sc, STE_MULTI_COLLS) 800 + CSR_READ_1(sc, STE_SINGLE_COLLS); 801 802 if (!sc->ste_link) { 803 mii_pollstat(mii); 804 if (mii->mii_media_status & IFM_ACTIVE && 805 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 806 sc->ste_link++; 807 /* 808 * we don't get a call-back on re-init so do it 809 * otherwise we get stuck in the wrong link state 810 */ 811 ste_miibus_statchg((struct device *)sc); 812 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 813 ste_start(ifp); 814 } 815 } 816 817 timeout_add_sec(&sc->sc_stats_tmo, 1); 818 splx(s); 819 820 return; 821 } 822 823 const struct pci_matchid ste_devices[] = { 824 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 }, 825 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }, 826 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_550TX } 827 }; 828 829 /* 830 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 831 * IDs against our list and return a device name if we find a match. 832 */ 833 int 834 ste_probe(struct device *parent, void *match, void *aux) 835 { 836 return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices, 837 nitems(ste_devices))); 838 } 839 840 /* 841 * Attach the interface. Allocate softc structures, do ifmedia 842 * setup and ethernet/BPF attach. 843 */ 844 void 845 ste_attach(struct device *parent, struct device *self, void *aux) 846 { 847 const char *intrstr = NULL; 848 pcireg_t command; 849 struct ste_softc *sc = (struct ste_softc *)self; 850 struct pci_attach_args *pa = aux; 851 pci_chipset_tag_t pc = pa->pa_pc; 852 pci_intr_handle_t ih; 853 struct ifnet *ifp; 854 bus_size_t size; 855 856 /* 857 * Handle power management nonsense. 858 */ 859 command = pci_conf_read(pc, pa->pa_tag, STE_PCI_CAPID) & 0x000000FF; 860 if (command == 0x01) { 861 862 command = pci_conf_read(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL); 863 if (command & STE_PSTATE_MASK) { 864 u_int32_t iobase, membase, irq; 865 866 /* Save important PCI config data. */ 867 iobase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOIO); 868 membase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOMEM); 869 irq = pci_conf_read(pc, pa->pa_tag, STE_PCI_INTLINE); 870 871 /* Reset the power state. */ 872 printf("%s: chip is in D%d power mode -- setting to D0\n", 873 sc->sc_dev.dv_xname, command & STE_PSTATE_MASK); 874 command &= 0xFFFFFFFC; 875 pci_conf_write(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL, command); 876 877 /* Restore PCI config data. */ 878 pci_conf_write(pc, pa->pa_tag, STE_PCI_LOIO, iobase); 879 pci_conf_write(pc, pa->pa_tag, STE_PCI_LOMEM, membase); 880 pci_conf_write(pc, pa->pa_tag, STE_PCI_INTLINE, irq); 881 } 882 } 883 884 /* 885 * Only use one PHY since this chip reports multiple 886 * Note on the DFE-550 the PHY is at 1 on the DFE-580 887 * it is at 0 & 1. It is rev 0x12. 888 */ 889 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK && 890 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_550TX && 891 PCI_REVISION(pa->pa_class) == 0x12) 892 sc->ste_one_phy = 1; 893 894 /* 895 * Map control/status registers. 896 */ 897 898 #ifdef STE_USEIOSPACE 899 if (pci_mapreg_map(pa, STE_PCI_LOIO, 900 PCI_MAPREG_TYPE_IO, 0, 901 &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) { 902 printf(": can't map i/o space\n"); 903 return; 904 } 905 #else 906 if (pci_mapreg_map(pa, STE_PCI_LOMEM, 907 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, 908 &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) { 909 printf(": can't map mem space\n"); 910 return; 911 } 912 #endif 913 914 /* Allocate interrupt */ 915 if (pci_intr_map(pa, &ih)) { 916 printf(": couldn't map interrupt\n"); 917 goto fail_1; 918 } 919 intrstr = pci_intr_string(pc, ih); 920 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc, 921 self->dv_xname); 922 if (sc->sc_ih == NULL) { 923 printf(": couldn't establish interrupt"); 924 if (intrstr != NULL) 925 printf(" at %s", intrstr); 926 printf("\n"); 927 goto fail_1; 928 } 929 printf(": %s", intrstr); 930 931 /* Reset the adapter. */ 932 ste_reset(sc); 933 934 /* 935 * Get station address from the EEPROM. 936 */ 937 if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 938 STE_EEADDR_NODE0, 3, 0)) { 939 printf(": failed to read station address\n"); 940 goto fail_2; 941 } 942 943 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 944 945 sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8, 946 M_DEVBUF, M_DONTWAIT); 947 if (sc->ste_ldata_ptr == NULL) { 948 printf(": no memory for list buffers!\n"); 949 goto fail_2; 950 } 951 952 sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr; 953 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 954 955 ifp = &sc->arpcom.ac_if; 956 ifp->if_softc = sc; 957 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 958 ifp->if_ioctl = ste_ioctl; 959 ifp->if_start = ste_start; 960 ifp->if_watchdog = ste_watchdog; 961 ifp->if_baudrate = 10000000; 962 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 963 IFQ_SET_READY(&ifp->if_snd); 964 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 965 ifp->if_capabilities = IFCAP_VLAN_MTU; 966 967 sc->ste_tx_thresh = STE_TXSTART_THRESH; 968 969 sc->sc_mii.mii_ifp = ifp; 970 sc->sc_mii.mii_readreg = ste_miibus_readreg; 971 sc->sc_mii.mii_writereg = ste_miibus_writereg; 972 sc->sc_mii.mii_statchg = ste_miibus_statchg; 973 ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts); 974 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 975 0); 976 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 977 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 978 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 979 } else 980 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 981 982 /* 983 * Call MI attach routines. 984 */ 985 if_attach(ifp); 986 ether_ifattach(ifp); 987 return; 988 989 fail_2: 990 pci_intr_disestablish(pc, sc->sc_ih); 991 992 fail_1: 993 bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size); 994 } 995 996 int 997 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m) 998 { 999 struct mbuf *m_new = NULL; 1000 1001 if (m == NULL) { 1002 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1003 if (m_new == NULL) 1004 return(ENOBUFS); 1005 MCLGET(m_new, M_DONTWAIT); 1006 if (!(m_new->m_flags & M_EXT)) { 1007 m_freem(m_new); 1008 return(ENOBUFS); 1009 } 1010 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1011 } else { 1012 m_new = m; 1013 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1014 m_new->m_data = m_new->m_ext.ext_buf; 1015 } 1016 1017 m_adj(m_new, ETHER_ALIGN); 1018 1019 c->ste_mbuf = m_new; 1020 c->ste_ptr->ste_status = 0; 1021 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t)); 1022 c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST; 1023 1024 return(0); 1025 } 1026 1027 int 1028 ste_init_rx_list(struct ste_softc *sc) 1029 { 1030 struct ste_chain_data *cd; 1031 struct ste_list_data *ld; 1032 int i; 1033 1034 cd = &sc->ste_cdata; 1035 ld = sc->ste_ldata; 1036 1037 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1038 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1039 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) 1040 return(ENOBUFS); 1041 if (i == (STE_RX_LIST_CNT - 1)) { 1042 cd->ste_rx_chain[i].ste_next = 1043 &cd->ste_rx_chain[0]; 1044 ld->ste_rx_list[i].ste_next = 1045 vtophys((vaddr_t)&ld->ste_rx_list[0]); 1046 } else { 1047 cd->ste_rx_chain[i].ste_next = 1048 &cd->ste_rx_chain[i + 1]; 1049 ld->ste_rx_list[i].ste_next = 1050 vtophys((vaddr_t)&ld->ste_rx_list[i + 1]); 1051 } 1052 ld->ste_rx_list[i].ste_status = 0; 1053 } 1054 1055 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1056 1057 return(0); 1058 } 1059 1060 void 1061 ste_init_tx_list(struct ste_softc *sc) 1062 { 1063 struct ste_chain_data *cd; 1064 struct ste_list_data *ld; 1065 int i; 1066 1067 cd = &sc->ste_cdata; 1068 ld = sc->ste_ldata; 1069 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1070 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1071 cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]); 1072 if (i == (STE_TX_LIST_CNT - 1)) 1073 cd->ste_tx_chain[i].ste_next = 1074 &cd->ste_tx_chain[0]; 1075 else 1076 cd->ste_tx_chain[i].ste_next = 1077 &cd->ste_tx_chain[i + 1]; 1078 } 1079 1080 bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT); 1081 1082 cd->ste_tx_prod = 0; 1083 cd->ste_tx_cons = 0; 1084 1085 return; 1086 } 1087 1088 void 1089 ste_init(void *xsc) 1090 { 1091 struct ste_softc *sc = (struct ste_softc *)xsc; 1092 struct ifnet *ifp = &sc->arpcom.ac_if; 1093 struct mii_data *mii; 1094 int i, s; 1095 1096 s = splnet(); 1097 1098 ste_stop(sc); 1099 1100 mii = &sc->sc_mii; 1101 1102 /* Init our MAC address */ 1103 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1104 CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1105 } 1106 1107 /* Init RX list */ 1108 if (ste_init_rx_list(sc) == ENOBUFS) { 1109 printf("%s: initialization failed: no " 1110 "memory for RX buffers\n", sc->sc_dev.dv_xname); 1111 ste_stop(sc); 1112 splx(s); 1113 return; 1114 } 1115 1116 /* Set RX polling interval */ 1117 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1118 1119 /* Init TX descriptors */ 1120 ste_init_tx_list(sc); 1121 1122 /* Set the TX freethresh value */ 1123 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8); 1124 1125 /* Set the TX start threshold for best performance. */ 1126 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1127 1128 /* Set the TX reclaim threshold. */ 1129 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4)); 1130 1131 /* Set up the RX filter. */ 1132 CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); 1133 1134 /* If we want promiscuous mode, set the allframes bit. */ 1135 if (ifp->if_flags & IFF_PROMISC) { 1136 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1137 } else { 1138 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1139 } 1140 1141 /* Set capture broadcast bit to accept broadcast frames. */ 1142 if (ifp->if_flags & IFF_BROADCAST) { 1143 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1144 } else { 1145 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1146 } 1147 1148 ste_setmulti(sc); 1149 1150 /* Load the address of the RX list. */ 1151 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1152 ste_wait(sc); 1153 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1154 vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0])); 1155 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1156 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1157 1158 /* Set TX polling interval (defer until we TX first packet) */ 1159 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1160 1161 /* Load address of the TX list */ 1162 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1163 ste_wait(sc); 1164 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1165 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1166 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1167 ste_wait(sc); 1168 sc->ste_tx_prev=NULL; 1169 1170 /* Enable receiver and transmitter */ 1171 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1172 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1173 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1174 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1175 1176 /* Enable stats counters. */ 1177 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1178 1179 /* Enable interrupts. */ 1180 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1181 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1182 1183 /* Accept VLAN length packets */ 1184 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, 1185 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1186 1187 ste_ifmedia_upd(ifp); 1188 1189 ifp->if_flags |= IFF_RUNNING; 1190 ifp->if_flags &= ~IFF_OACTIVE; 1191 1192 splx(s); 1193 1194 timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc); 1195 timeout_add_sec(&sc->sc_stats_tmo, 1); 1196 1197 return; 1198 } 1199 1200 void 1201 ste_stop(struct ste_softc *sc) 1202 { 1203 int i; 1204 struct ifnet *ifp; 1205 1206 ifp = &sc->arpcom.ac_if; 1207 1208 timeout_del(&sc->sc_stats_tmo); 1209 1210 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 1211 1212 CSR_WRITE_2(sc, STE_IMR, 0); 1213 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); 1214 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); 1215 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); 1216 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1217 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1218 ste_wait(sc); 1219 /* 1220 * Try really hard to stop the RX engine or under heavy RX 1221 * data chip will write into de-allocated memory. 1222 */ 1223 ste_reset(sc); 1224 1225 sc->ste_link = 0; 1226 1227 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1228 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { 1229 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); 1230 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; 1231 } 1232 } 1233 1234 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1235 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { 1236 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); 1237 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; 1238 } 1239 } 1240 1241 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 1242 1243 return; 1244 } 1245 1246 void 1247 ste_reset(struct ste_softc *sc) 1248 { 1249 int i; 1250 1251 STE_SETBIT4(sc, STE_ASICCTL, 1252 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| 1253 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| 1254 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| 1255 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| 1256 STE_ASICCTL_EXTRESET_RESET); 1257 1258 DELAY(100000); 1259 1260 for (i = 0; i < STE_TIMEOUT; i++) { 1261 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1262 break; 1263 } 1264 1265 if (i == STE_TIMEOUT) 1266 printf("%s: global reset never completed\n", 1267 sc->sc_dev.dv_xname); 1268 } 1269 1270 int 1271 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1272 { 1273 struct ste_softc *sc = ifp->if_softc; 1274 struct ifaddr *ifa = (struct ifaddr *) data; 1275 struct ifreq *ifr = (struct ifreq *) data; 1276 struct mii_data *mii; 1277 int s, error = 0; 1278 1279 s = splnet(); 1280 1281 switch(command) { 1282 case SIOCSIFADDR: 1283 ifp->if_flags |= IFF_UP; 1284 switch (ifa->ifa_addr->sa_family) { 1285 case AF_INET: 1286 ste_init(sc); 1287 arp_ifinit(&sc->arpcom, ifa); 1288 break; 1289 default: 1290 ste_init(sc); 1291 break; 1292 } 1293 break; 1294 1295 case SIOCSIFFLAGS: 1296 if (ifp->if_flags & IFF_UP) { 1297 if (ifp->if_flags & IFF_RUNNING && 1298 ifp->if_flags & IFF_PROMISC && 1299 !(sc->ste_if_flags & IFF_PROMISC)) { 1300 STE_SETBIT1(sc, STE_RX_MODE, 1301 STE_RXMODE_PROMISC); 1302 } else if (ifp->if_flags & IFF_RUNNING && 1303 !(ifp->if_flags & IFF_PROMISC) && 1304 sc->ste_if_flags & IFF_PROMISC) { 1305 STE_CLRBIT1(sc, STE_RX_MODE, 1306 STE_RXMODE_PROMISC); 1307 } 1308 if (ifp->if_flags & IFF_RUNNING && 1309 (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI) 1310 ste_setmulti(sc); 1311 if (!(ifp->if_flags & IFF_RUNNING)) { 1312 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1313 ste_init(sc); 1314 } 1315 } else { 1316 if (ifp->if_flags & IFF_RUNNING) 1317 ste_stop(sc); 1318 } 1319 sc->ste_if_flags = ifp->if_flags; 1320 error = 0; 1321 break; 1322 1323 case SIOCGIFMEDIA: 1324 case SIOCSIFMEDIA: 1325 mii = &sc->sc_mii; 1326 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1327 break; 1328 1329 default: 1330 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1331 } 1332 1333 if (error == ENETRESET) { 1334 if (ifp->if_flags & IFF_RUNNING) 1335 ste_setmulti(sc); 1336 error = 0; 1337 } 1338 1339 splx(s); 1340 return(error); 1341 } 1342 1343 int 1344 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head) 1345 { 1346 int frag = 0; 1347 struct ste_frag *f = NULL; 1348 struct mbuf *m; 1349 struct ste_desc *d; 1350 1351 d = c->ste_ptr; 1352 d->ste_ctl = 0; 1353 1354 encap_retry: 1355 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1356 if (m->m_len != 0) { 1357 if (frag == STE_MAXFRAGS) 1358 break; 1359 f = &d->ste_frags[frag]; 1360 f->ste_addr = vtophys(mtod(m, vaddr_t)); 1361 f->ste_len = m->m_len; 1362 frag++; 1363 } 1364 } 1365 1366 if (m != NULL) { 1367 struct mbuf *mn; 1368 1369 /* 1370 * We ran out of segments. We have to recopy this 1371 * mbuf chain first. Bail out if we can't get the 1372 * new buffers. 1373 */ 1374 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1375 if (mn == NULL) { 1376 m_freem(m_head); 1377 return ENOMEM; 1378 } 1379 if (m_head->m_pkthdr.len > MHLEN) { 1380 MCLGET(mn, M_DONTWAIT); 1381 if ((mn->m_flags & M_EXT) == 0) { 1382 m_freem(mn); 1383 m_freem(m_head); 1384 return ENOMEM; 1385 } 1386 } 1387 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1388 mtod(mn, caddr_t)); 1389 mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len; 1390 m_freem(m_head); 1391 m_head = mn; 1392 goto encap_retry; 1393 } 1394 1395 c->ste_mbuf = m_head; 1396 d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; 1397 d->ste_ctl = 1; 1398 1399 return(0); 1400 } 1401 1402 void 1403 ste_start(struct ifnet *ifp) 1404 { 1405 struct ste_softc *sc; 1406 struct mbuf *m_head = NULL; 1407 struct ste_chain *cur_tx; 1408 int idx; 1409 1410 sc = ifp->if_softc; 1411 1412 if (!sc->ste_link) 1413 return; 1414 1415 if (ifp->if_flags & IFF_OACTIVE) 1416 return; 1417 1418 idx = sc->ste_cdata.ste_tx_prod; 1419 1420 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { 1421 /* 1422 * We cannot re-use the last (free) descriptor; 1423 * the chip may not have read its ste_next yet. 1424 */ 1425 if (STE_NEXT(idx, STE_TX_LIST_CNT) == 1426 sc->ste_cdata.ste_tx_cons) { 1427 ifp->if_flags |= IFF_OACTIVE; 1428 break; 1429 } 1430 1431 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1432 if (m_head == NULL) 1433 break; 1434 1435 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 1436 1437 if (ste_encap(sc, cur_tx, m_head) != 0) 1438 break; 1439 1440 cur_tx->ste_ptr->ste_next = 0; 1441 1442 if (sc->ste_tx_prev == NULL) { 1443 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1444 /* Load address of the TX list */ 1445 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1446 ste_wait(sc); 1447 1448 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1449 vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0])); 1450 1451 /* Set TX polling interval to start TX engine */ 1452 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1453 1454 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1455 ste_wait(sc); 1456 }else{ 1457 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1458 sc->ste_tx_prev->ste_ptr->ste_next 1459 = cur_tx->ste_phys; 1460 } 1461 1462 sc->ste_tx_prev = cur_tx; 1463 1464 #if NBPFILTER > 0 1465 /* 1466 * If there's a BPF listener, bounce a copy of this frame 1467 * to him. 1468 */ 1469 if (ifp->if_bpf) 1470 bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf, 1471 BPF_DIRECTION_OUT); 1472 #endif 1473 1474 STE_INC(idx, STE_TX_LIST_CNT); 1475 ifp->if_timer = 5; 1476 } 1477 sc->ste_cdata.ste_tx_prod = idx; 1478 1479 return; 1480 } 1481 1482 void 1483 ste_watchdog(struct ifnet *ifp) 1484 { 1485 struct ste_softc *sc; 1486 1487 sc = ifp->if_softc; 1488 1489 ifp->if_oerrors++; 1490 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1491 1492 ste_txeoc(sc); 1493 ste_txeof(sc); 1494 ste_rxeoc(sc); 1495 ste_rxeof(sc); 1496 ste_reset(sc); 1497 ste_init(sc); 1498 1499 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1500 ste_start(ifp); 1501 1502 return; 1503 } 1504