1 /* $OpenBSD: if_ste.c,v 1.20 2003/06/29 17:20:03 avsm Exp $ */ 2 /* 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $ 34 */ 35 36 #include "bpfilter.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/timeout.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_types.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <net/if_media.h> 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <uvm/uvm_extern.h> /* for vtophys */ 68 69 #include <sys/device.h> 70 71 #include <dev/mii/mii.h> 72 #include <dev/mii/miivar.h> 73 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcidevs.h> 77 78 #define STE_USEIOSPACE 79 80 #include <dev/pci/if_stereg.h> 81 82 int ste_probe(struct device *, void *, void *); 83 void ste_attach(struct device *, struct device *, void *); 84 int ste_intr(void *); 85 void ste_shutdown(void *); 86 void ste_init(void *); 87 void ste_rxeof(struct ste_softc *); 88 void ste_txeoc(struct ste_softc *); 89 void ste_txeof(struct ste_softc *); 90 void ste_stats_update(void *); 91 void ste_stop(struct ste_softc *); 92 void ste_reset(struct ste_softc *); 93 int ste_ioctl(struct ifnet *, u_long, caddr_t); 94 int ste_encap(struct ste_softc *, struct ste_chain *, 95 struct mbuf *); 96 void ste_start(struct ifnet *); 97 void ste_watchdog(struct ifnet *); 98 int ste_newbuf(struct ste_softc *, 99 struct ste_chain_onefrag *, 100 struct mbuf *); 101 int ste_ifmedia_upd(struct ifnet *); 102 void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103 104 void ste_mii_sync(struct ste_softc *); 105 void ste_mii_send(struct ste_softc *, u_int32_t, int); 106 int ste_mii_readreg(struct ste_softc *, 107 struct ste_mii_frame *); 108 int ste_mii_writereg(struct ste_softc *, 109 struct ste_mii_frame *); 110 int ste_miibus_readreg(struct device *, int, int); 111 void ste_miibus_writereg(struct device *, int, int, int); 112 void ste_miibus_statchg(struct device *); 113 114 int ste_eeprom_wait(struct ste_softc *); 115 int ste_read_eeprom(struct ste_softc *, caddr_t, int, 116 int, int); 117 void ste_wait(struct ste_softc *); 118 u_int8_t ste_calchash(caddr_t); 119 void ste_setmulti(struct ste_softc *); 120 int ste_init_rx_list(struct ste_softc *); 121 void ste_init_tx_list(struct ste_softc *); 122 123 #define STE_SETBIT4(sc, reg, x) \ 124 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 125 126 #define STE_CLRBIT4(sc, reg, x) \ 127 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 128 129 #define STE_SETBIT2(sc, reg, x) \ 130 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x) 131 132 #define STE_CLRBIT2(sc, reg, x) \ 133 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x) 134 135 #define STE_SETBIT1(sc, reg, x) \ 136 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x) 137 138 #define STE_CLRBIT1(sc, reg, x) \ 139 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x) 140 141 142 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 143 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 144 145 /* 146 * Sync the PHYs by setting data bit and strobing the clock 32 times. 147 */ 148 void ste_mii_sync(sc) 149 struct ste_softc *sc; 150 { 151 register int i; 152 153 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 154 155 for (i = 0; i < 32; i++) { 156 MII_SET(STE_PHYCTL_MCLK); 157 DELAY(1); 158 MII_CLR(STE_PHYCTL_MCLK); 159 DELAY(1); 160 } 161 162 return; 163 } 164 165 /* 166 * Clock a series of bits through the MII. 167 */ 168 void ste_mii_send(sc, bits, cnt) 169 struct ste_softc *sc; 170 u_int32_t bits; 171 int cnt; 172 { 173 int i; 174 175 MII_CLR(STE_PHYCTL_MCLK); 176 177 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 178 if (bits & i) { 179 MII_SET(STE_PHYCTL_MDATA); 180 } else { 181 MII_CLR(STE_PHYCTL_MDATA); 182 } 183 DELAY(1); 184 MII_CLR(STE_PHYCTL_MCLK); 185 DELAY(1); 186 MII_SET(STE_PHYCTL_MCLK); 187 } 188 } 189 190 /* 191 * Read an PHY register through the MII. 192 */ 193 int ste_mii_readreg(sc, frame) 194 struct ste_softc *sc; 195 struct ste_mii_frame *frame; 196 197 { 198 int i, ack, s; 199 200 s = splimp(); 201 202 /* 203 * Set up frame for RX. 204 */ 205 frame->mii_stdelim = STE_MII_STARTDELIM; 206 frame->mii_opcode = STE_MII_READOP; 207 frame->mii_turnaround = 0; 208 frame->mii_data = 0; 209 210 CSR_WRITE_2(sc, STE_PHYCTL, 0); 211 /* 212 * Turn on data xmit. 213 */ 214 MII_SET(STE_PHYCTL_MDIR); 215 216 ste_mii_sync(sc); 217 218 /* 219 * Send command/address info. 220 */ 221 ste_mii_send(sc, frame->mii_stdelim, 2); 222 ste_mii_send(sc, frame->mii_opcode, 2); 223 ste_mii_send(sc, frame->mii_phyaddr, 5); 224 ste_mii_send(sc, frame->mii_regaddr, 5); 225 226 /* Turn off xmit. */ 227 MII_CLR(STE_PHYCTL_MDIR); 228 229 /* Idle bit */ 230 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 231 DELAY(1); 232 MII_SET(STE_PHYCTL_MCLK); 233 DELAY(1); 234 235 /* Check for ack */ 236 MII_CLR(STE_PHYCTL_MCLK); 237 DELAY(1); 238 MII_SET(STE_PHYCTL_MCLK); 239 DELAY(1); 240 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 241 242 /* 243 * Now try reading data bits. If the ack failed, we still 244 * need to clock through 16 cycles to keep the PHY(s) in sync. 245 */ 246 if (ack) { 247 for(i = 0; i < 16; i++) { 248 MII_CLR(STE_PHYCTL_MCLK); 249 DELAY(1); 250 MII_SET(STE_PHYCTL_MCLK); 251 DELAY(1); 252 } 253 goto fail; 254 } 255 256 for (i = 0x8000; i; i >>= 1) { 257 MII_CLR(STE_PHYCTL_MCLK); 258 DELAY(1); 259 if (!ack) { 260 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 261 frame->mii_data |= i; 262 DELAY(1); 263 } 264 MII_SET(STE_PHYCTL_MCLK); 265 DELAY(1); 266 } 267 268 fail: 269 270 MII_CLR(STE_PHYCTL_MCLK); 271 DELAY(1); 272 MII_SET(STE_PHYCTL_MCLK); 273 DELAY(1); 274 275 splx(s); 276 277 if (ack) 278 return(1); 279 return(0); 280 } 281 282 /* 283 * Write to a PHY register through the MII. 284 */ 285 int ste_mii_writereg(sc, frame) 286 struct ste_softc *sc; 287 struct ste_mii_frame *frame; 288 289 { 290 int s; 291 292 s = splimp(); 293 /* 294 * Set up frame for TX. 295 */ 296 297 frame->mii_stdelim = STE_MII_STARTDELIM; 298 frame->mii_opcode = STE_MII_WRITEOP; 299 frame->mii_turnaround = STE_MII_TURNAROUND; 300 301 /* 302 * Turn on data output. 303 */ 304 MII_SET(STE_PHYCTL_MDIR); 305 306 ste_mii_sync(sc); 307 308 ste_mii_send(sc, frame->mii_stdelim, 2); 309 ste_mii_send(sc, frame->mii_opcode, 2); 310 ste_mii_send(sc, frame->mii_phyaddr, 5); 311 ste_mii_send(sc, frame->mii_regaddr, 5); 312 ste_mii_send(sc, frame->mii_turnaround, 2); 313 ste_mii_send(sc, frame->mii_data, 16); 314 315 /* Idle bit. */ 316 MII_SET(STE_PHYCTL_MCLK); 317 DELAY(1); 318 MII_CLR(STE_PHYCTL_MCLK); 319 DELAY(1); 320 321 /* 322 * Turn off xmit. 323 */ 324 MII_CLR(STE_PHYCTL_MDIR); 325 326 splx(s); 327 328 return(0); 329 } 330 331 int ste_miibus_readreg(self, phy, reg) 332 struct device *self; 333 int phy, reg; 334 { 335 struct ste_softc *sc = (struct ste_softc *)self; 336 struct ste_mii_frame frame; 337 338 bzero((char *)&frame, sizeof(frame)); 339 340 frame.mii_phyaddr = phy; 341 frame.mii_regaddr = reg; 342 ste_mii_readreg(sc, &frame); 343 344 return(frame.mii_data); 345 } 346 347 void ste_miibus_writereg(self, phy, reg, data) 348 struct device *self; 349 int phy, reg, data; 350 { 351 struct ste_softc *sc = (struct ste_softc *)self; 352 struct ste_mii_frame frame; 353 354 bzero((char *)&frame, sizeof(frame)); 355 356 frame.mii_phyaddr = phy; 357 frame.mii_regaddr = reg; 358 frame.mii_data = data; 359 360 ste_mii_writereg(sc, &frame); 361 362 return; 363 } 364 365 void ste_miibus_statchg(self) 366 struct device *self; 367 { 368 struct ste_softc *sc = (struct ste_softc *)self; 369 struct mii_data *mii; 370 371 mii = &sc->sc_mii; 372 373 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 374 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 375 } else { 376 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 377 } 378 379 return; 380 } 381 382 int ste_ifmedia_upd(ifp) 383 struct ifnet *ifp; 384 { 385 struct ste_softc *sc; 386 struct mii_data *mii; 387 388 sc = ifp->if_softc; 389 mii = &sc->sc_mii; 390 sc->ste_link = 0; 391 if (mii->mii_instance) { 392 struct mii_softc *miisc; 393 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 394 miisc = LIST_NEXT(miisc, mii_list)) 395 mii_phy_reset(miisc); 396 } 397 mii_mediachg(mii); 398 399 return(0); 400 } 401 402 void ste_ifmedia_sts(ifp, ifmr) 403 struct ifnet *ifp; 404 struct ifmediareq *ifmr; 405 { 406 struct ste_softc *sc; 407 struct mii_data *mii; 408 409 sc = ifp->if_softc; 410 mii = &sc->sc_mii; 411 412 mii_pollstat(mii); 413 ifmr->ifm_active = mii->mii_media_active; 414 ifmr->ifm_status = mii->mii_media_status; 415 416 return; 417 } 418 419 void ste_wait(sc) 420 struct ste_softc *sc; 421 { 422 register int i; 423 424 for (i = 0; i < STE_TIMEOUT; i++) { 425 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 426 break; 427 } 428 429 if (i == STE_TIMEOUT) 430 printf("%s: command never completed!\n", sc->sc_dev.dv_xname); 431 432 return; 433 } 434 435 /* 436 * The EEPROM is slow: give it time to come ready after issuing 437 * it a command. 438 */ 439 int ste_eeprom_wait(sc) 440 struct ste_softc *sc; 441 { 442 int i; 443 444 DELAY(1000); 445 446 for (i = 0; i < 100; i++) { 447 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 448 DELAY(1000); 449 else 450 break; 451 } 452 453 if (i == 100) { 454 printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname); 455 return(1); 456 } 457 458 return(0); 459 } 460 461 /* 462 * Read a sequence of words from the EEPROM. Note that ethernet address 463 * data is stored in the EEPROM in network byte order. 464 */ 465 int ste_read_eeprom(sc, dest, off, cnt, swap) 466 struct ste_softc *sc; 467 caddr_t dest; 468 int off; 469 int cnt; 470 int swap; 471 { 472 int err = 0, i; 473 u_int16_t word = 0, *ptr; 474 475 if (ste_eeprom_wait(sc)) 476 return(1); 477 478 for (i = 0; i < cnt; i++) { 479 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 480 err = ste_eeprom_wait(sc); 481 if (err) 482 break; 483 word = CSR_READ_2(sc, STE_EEPROM_DATA); 484 ptr = (u_int16_t *)(dest + (i * 2)); 485 if (swap) 486 *ptr = ntohs(word); 487 else 488 *ptr = word; 489 } 490 491 return(err ? 1 : 0); 492 } 493 494 u_int8_t ste_calchash(addr) 495 caddr_t addr; 496 { 497 498 u_int32_t crc, carry; 499 int i, j; 500 u_int8_t c; 501 502 /* Compute CRC for the address value. */ 503 crc = 0xFFFFFFFF; /* initial value */ 504 505 for (i = 0; i < 6; i++) { 506 c = *(addr + i); 507 for (j = 0; j < 8; j++) { 508 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 509 crc <<= 1; 510 c >>= 1; 511 if (carry) 512 crc = (crc ^ 0x04c11db6) | carry; 513 } 514 } 515 516 /* return the filter bit position */ 517 return(crc & 0x0000003F); 518 } 519 520 void ste_setmulti(sc) 521 struct ste_softc *sc; 522 { 523 struct ifnet *ifp; 524 struct arpcom *ac = &sc->arpcom; 525 struct ether_multi *enm; 526 struct ether_multistep step; 527 int h = 0; 528 u_int32_t hashes[2] = { 0, 0 }; 529 530 ifp = &sc->arpcom.ac_if; 531 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 532 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 533 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 534 return; 535 } 536 537 /* first, zot all the existing hash bits */ 538 CSR_WRITE_4(sc, STE_MAR0, 0); 539 CSR_WRITE_4(sc, STE_MAR1, 0); 540 541 /* now program new ones */ 542 ETHER_FIRST_MULTI(step, ac, enm); 543 while (enm != NULL) { 544 h = ste_calchash(enm->enm_addrlo); 545 if (h < 32) 546 hashes[0] |= (1 << h); 547 else 548 hashes[1] |= (1 << (h - 32)); 549 ETHER_NEXT_MULTI(step, enm); 550 } 551 552 CSR_WRITE_4(sc, STE_MAR0, hashes[0]); 553 CSR_WRITE_4(sc, STE_MAR1, hashes[1]); 554 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 555 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 556 557 return; 558 } 559 560 int ste_intr(xsc) 561 void *xsc; 562 { 563 struct ste_softc *sc; 564 struct ifnet *ifp; 565 u_int16_t status; 566 int claimed = 0; 567 568 sc = xsc; 569 ifp = &sc->arpcom.ac_if; 570 571 /* See if this is really our interrupt. */ 572 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) 573 return claimed; 574 575 for (;;) { 576 status = CSR_READ_2(sc, STE_ISR_ACK); 577 578 if (!(status & STE_INTRS)) 579 break; 580 581 claimed = 1; 582 583 if (status & STE_ISR_RX_DMADONE) 584 ste_rxeof(sc); 585 586 if (status & STE_ISR_TX_DMADONE) 587 ste_txeof(sc); 588 589 if (status & STE_ISR_TX_DONE) 590 ste_txeoc(sc); 591 592 if (status & STE_ISR_STATS_OFLOW) { 593 timeout_del(&sc->sc_stats_tmo); 594 ste_stats_update(sc); 595 } 596 597 if (status & STE_ISR_HOSTERR) { 598 ste_reset(sc); 599 ste_init(sc); 600 } 601 } 602 603 /* Re-enable interrupts */ 604 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 605 606 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 607 ste_start(ifp); 608 609 return claimed; 610 } 611 612 /* 613 * A frame has been uploaded: pass the resulting mbuf chain up to 614 * the higher level protocols. 615 */ 616 void ste_rxeof(sc) 617 struct ste_softc *sc; 618 { 619 struct mbuf *m; 620 struct ifnet *ifp; 621 struct ste_chain_onefrag *cur_rx; 622 int total_len = 0; 623 u_int32_t rxstat; 624 625 ifp = &sc->arpcom.ac_if; 626 627 again: 628 629 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)) { 630 cur_rx = sc->ste_cdata.ste_rx_head; 631 sc->ste_cdata.ste_rx_head = cur_rx->ste_next; 632 633 /* 634 * If an error occurs, update stats, clear the 635 * status word and leave the mbuf cluster in place: 636 * it should simply get re-used next time this descriptor 637 * comes up in the ring. 638 */ 639 if (rxstat & STE_RXSTAT_FRAME_ERR) { 640 ifp->if_ierrors++; 641 cur_rx->ste_ptr->ste_status = 0; 642 continue; 643 } 644 645 /* 646 * If there error bit was not set, the upload complete 647 * bit should be set which means we have a valid packet. 648 * If not, something truly strange has happened. 649 */ 650 if (!(rxstat & STE_RXSTAT_DMADONE)) { 651 printf("%s: bad receive status -- packet dropped", 652 sc->sc_dev.dv_xname); 653 ifp->if_ierrors++; 654 cur_rx->ste_ptr->ste_status = 0; 655 continue; 656 } 657 658 /* No errors; receive the packet. */ 659 m = cur_rx->ste_mbuf; 660 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; 661 662 /* 663 * Try to conjure up a new mbuf cluster. If that 664 * fails, it means we have an out of memory condition and 665 * should leave the buffer in place and continue. This will 666 * result in a lost packet, but there's little else we 667 * can do in this situation. 668 */ 669 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 670 ifp->if_ierrors++; 671 cur_rx->ste_ptr->ste_status = 0; 672 continue; 673 } 674 675 ifp->if_ipackets++; 676 m->m_pkthdr.rcvif = ifp; 677 m->m_pkthdr.len = m->m_len = total_len; 678 679 #if NBPFILTER > 0 680 if (ifp->if_bpf) 681 bpf_mtap(ifp->if_bpf, m); 682 #endif 683 684 /* pass it on. */ 685 ether_input_mbuf(ifp, m); 686 } 687 688 /* 689 * Handle the 'end of channel' condition. When the upload 690 * engine hits the end of the RX ring, it will stall. This 691 * is our cue to flush the RX ring, reload the uplist pointer 692 * register and unstall the engine. 693 * XXX This is actually a little goofy. With the ThunderLAN 694 * chip, you get an interrupt when the receiver hits the end 695 * of the receive ring, which tells you exactly when you 696 * you need to reload the ring pointer. Here we have to 697 * fake it. I'm mad at myself for not being clever enough 698 * to avoid the use of a goto here. 699 */ 700 if (CSR_READ_4(sc, STE_RX_DMALIST_PTR) == 0 || 701 CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_RXDMA_STOPPED) { 702 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 703 ste_wait(sc); 704 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 705 vtophys(&sc->ste_ldata->ste_rx_list[0])); 706 sc->ste_cdata.ste_rx_head = &sc->ste_cdata.ste_rx_chain[0]; 707 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 708 goto again; 709 } 710 711 return; 712 } 713 714 void ste_txeoc(sc) 715 struct ste_softc *sc; 716 { 717 u_int8_t txstat; 718 struct ifnet *ifp; 719 720 ifp = &sc->arpcom.ac_if; 721 722 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & 723 STE_TXSTATUS_TXDONE) { 724 if (txstat & STE_TXSTATUS_UNDERRUN || 725 txstat & STE_TXSTATUS_EXCESSCOLLS || 726 txstat & STE_TXSTATUS_RECLAIMERR) { 727 ifp->if_oerrors++; 728 printf("%s: transmission error: %x\n", 729 sc->sc_dev.dv_xname, txstat); 730 731 ste_reset(sc); 732 ste_init(sc); 733 734 if (txstat & STE_TXSTATUS_UNDERRUN && 735 sc->ste_tx_thresh < STE_PACKET_SIZE) { 736 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 737 printf("%s: tx underrun, increasing tx" 738 " start threshold to %d bytes\n", 739 sc->sc_dev.dv_xname, sc->ste_tx_thresh); 740 } 741 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 742 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, 743 (STE_PACKET_SIZE >> 4)); 744 } 745 ste_init(sc); 746 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 747 } 748 749 return; 750 } 751 752 void ste_txeof(sc) 753 struct ste_softc *sc; 754 { 755 struct ste_chain *cur_tx = NULL; 756 struct ifnet *ifp; 757 int idx; 758 759 ifp = &sc->arpcom.ac_if; 760 761 idx = sc->ste_cdata.ste_tx_cons; 762 while(idx != sc->ste_cdata.ste_tx_prod) { 763 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 764 765 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) 766 break; 767 768 if (cur_tx->ste_mbuf != NULL) { 769 m_freem(cur_tx->ste_mbuf); 770 cur_tx->ste_mbuf = NULL; 771 } 772 773 ifp->if_opackets++; 774 775 sc->ste_cdata.ste_tx_cnt--; 776 STE_INC(idx, STE_TX_LIST_CNT); 777 ifp->if_timer = 0; 778 } 779 780 sc->ste_cdata.ste_tx_cons = idx; 781 782 if (cur_tx != NULL) 783 ifp->if_flags &= ~IFF_OACTIVE; 784 785 return; 786 } 787 788 void ste_stats_update(xsc) 789 void *xsc; 790 { 791 struct ste_softc *sc; 792 struct ste_stats stats; 793 struct ifnet *ifp; 794 struct mii_data *mii; 795 int i, s; 796 u_int8_t *p; 797 798 s = splimp(); 799 800 sc = xsc; 801 ifp = &sc->arpcom.ac_if; 802 mii = &sc->sc_mii; 803 804 p = (u_int8_t *)&stats; 805 806 for (i = 0; i < sizeof(stats); i++) { 807 *p = CSR_READ_1(sc, STE_STATS + i); 808 p++; 809 } 810 811 ifp->if_collisions += stats.ste_single_colls + 812 stats.ste_multi_colls + stats.ste_late_colls; 813 814 mii_tick(mii); 815 if (!sc->ste_link) { 816 mii_pollstat(mii); 817 if (mii->mii_media_status & IFM_ACTIVE && 818 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 819 sc->ste_link++; 820 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 821 ste_start(ifp); 822 } 823 824 timeout_add(&sc->sc_stats_tmo, hz); 825 splx(s); 826 827 return; 828 } 829 830 const struct pci_matchid ste_devices[] = { 831 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201 }, 832 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_550TX }, 833 }; 834 835 /* 836 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 837 * IDs against our list and return a device name if we find a match. 838 */ 839 int ste_probe(parent, match, aux) 840 struct device *parent; 841 void *match, *aux; 842 { 843 return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices, 844 sizeof(ste_devices)/sizeof(ste_devices[0]))); 845 } 846 847 /* 848 * Attach the interface. Allocate softc structures, do ifmedia 849 * setup and ethernet/BPF attach. 850 */ 851 void ste_attach(parent, self, aux) 852 struct device *parent, *self; 853 void *aux; 854 { 855 int s; 856 const char *intrstr = NULL; 857 u_int32_t command; 858 struct ste_softc *sc = (struct ste_softc *)self; 859 struct pci_attach_args *pa = aux; 860 pci_chipset_tag_t pc = pa->pa_pc; 861 pci_intr_handle_t ih; 862 struct ifnet *ifp; 863 bus_addr_t iobase; 864 bus_size_t iosize; 865 866 s = splimp(); 867 868 /* 869 * Handle power management nonsense. 870 */ 871 command = pci_conf_read(pc, pa->pa_tag, STE_PCI_CAPID) & 0x000000FF; 872 if (command == 0x01) { 873 874 command = pci_conf_read(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL); 875 if (command & STE_PSTATE_MASK) { 876 u_int32_t iobase, membase, irq; 877 878 /* Save important PCI config data. */ 879 iobase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOIO); 880 membase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOMEM); 881 irq = pci_conf_read(pc, pa->pa_tag, STE_PCI_INTLINE); 882 883 /* Reset the power state. */ 884 printf("%s: chip is in D%d power mode -- setting to D0\n", 885 sc->sc_dev.dv_xname, command & STE_PSTATE_MASK); 886 command &= 0xFFFFFFFC; 887 pci_conf_write(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL, command); 888 889 /* Restore PCI config data. */ 890 pci_conf_write(pc, pa->pa_tag, STE_PCI_LOIO, iobase); 891 pci_conf_write(pc, pa->pa_tag, STE_PCI_LOMEM, membase); 892 pci_conf_write(pc, pa->pa_tag, STE_PCI_INTLINE, irq); 893 } 894 } 895 896 /* 897 * Map control/status registers. 898 */ 899 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 900 command |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | 901 PCI_COMMAND_MASTER_ENABLE; 902 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 903 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 904 905 #ifdef STE_USEIOSPACE 906 if (!(command & PCI_COMMAND_IO_ENABLE)) { 907 printf(": failed to enable I/O ports\n"); 908 goto fail; 909 } 910 if (pci_io_find(pc, pa->pa_tag, STE_PCI_LOIO, &iobase, &iosize)) { 911 printf(": can't find I/O space\n"); 912 goto fail; 913 } 914 if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->ste_bhandle)) { 915 printf(": can't map I/O space\n"); 916 goto fail; 917 } 918 sc->ste_btag = pa->pa_iot; 919 #else 920 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 921 printf(": failed to enable memory mapping\n"); 922 goto fail; 923 } 924 if (pci_mem_find(pc, pa->pa_tag, STE_PCI_LOMEM, &iobase, &iosize,NULL)){ 925 printf(": can't find mem space\n"); 926 goto fail; 927 } 928 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->ste_bhandle)) { 929 printf(": can't map mem space\n"); 930 goto fail; 931 } 932 sc->ste_btag = pa->pa_memt; 933 #endif 934 935 /* Allocate interrupt */ 936 if (pci_intr_map(pa, &ih)) { 937 printf(": couldn't map interrupt\n"); 938 goto fail; 939 } 940 intrstr = pci_intr_string(pc, ih); 941 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc, 942 self->dv_xname); 943 if (sc->sc_ih == NULL) { 944 printf(": couldn't establish interrupt"); 945 if (intrstr != NULL) 946 printf(" at %s", intrstr); 947 printf("\n"); 948 goto fail; 949 } 950 printf(": %s", intrstr); 951 952 /* Reset the adapter. */ 953 ste_reset(sc); 954 955 /* 956 * Get station address from the EEPROM. 957 */ 958 ste_read_eeprom(sc,(caddr_t)&sc->arpcom.ac_enaddr,STE_EEADDR_NODE0,3,0); 959 960 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 961 962 sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8, 963 M_DEVBUF, M_NOWAIT); 964 if (sc->ste_ldata_ptr == NULL) { 965 printf("%s: no memory for list buffers!\n", sc->sc_dev.dv_xname); 966 goto fail; 967 } 968 969 sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr; 970 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 971 972 ifp = &sc->arpcom.ac_if; 973 ifp->if_softc = sc; 974 ifp->if_mtu = ETHERMTU; 975 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 976 ifp->if_ioctl = ste_ioctl; 977 ifp->if_output = ether_output; 978 ifp->if_start = ste_start; 979 ifp->if_watchdog = ste_watchdog; 980 ifp->if_baudrate = 10000000; 981 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 982 IFQ_SET_READY(&ifp->if_snd); 983 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 984 985 sc->sc_mii.mii_ifp = ifp; 986 sc->sc_mii.mii_readreg = ste_miibus_readreg; 987 sc->sc_mii.mii_writereg = ste_miibus_writereg; 988 sc->sc_mii.mii_statchg = ste_miibus_statchg; 989 ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts); 990 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 991 0); 992 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 993 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 994 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 995 } else 996 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 997 998 /* 999 * Call MI attach routines. 1000 */ 1001 if_attach(ifp); 1002 ether_ifattach(ifp); 1003 1004 shutdownhook_establish(ste_shutdown, sc); 1005 1006 fail: 1007 splx(s); 1008 return; 1009 } 1010 1011 int ste_newbuf(sc, c, m) 1012 struct ste_softc *sc; 1013 struct ste_chain_onefrag *c; 1014 struct mbuf *m; 1015 { 1016 struct mbuf *m_new = NULL; 1017 1018 if (m == NULL) { 1019 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1020 if (m_new == NULL) { 1021 printf("%s: no memory for rx list -- " 1022 "packet dropped\n", sc->sc_dev.dv_xname); 1023 return(ENOBUFS); 1024 } 1025 MCLGET(m_new, M_DONTWAIT); 1026 if (!(m_new->m_flags & M_EXT)) { 1027 printf("%s: no memory for rx list -- " 1028 "packet dropped\n", sc->sc_dev.dv_xname); 1029 m_freem(m_new); 1030 return(ENOBUFS); 1031 } 1032 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1033 } else { 1034 m_new = m; 1035 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1036 m_new->m_data = m_new->m_ext.ext_buf; 1037 } 1038 1039 m_adj(m_new, ETHER_ALIGN); 1040 1041 c->ste_mbuf = m_new; 1042 c->ste_ptr->ste_status = 0; 1043 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); 1044 c->ste_ptr->ste_frag.ste_len = 1536 | STE_FRAG_LAST; 1045 1046 return(0); 1047 } 1048 1049 int ste_init_rx_list(sc) 1050 struct ste_softc *sc; 1051 { 1052 struct ste_chain_data *cd; 1053 struct ste_list_data *ld; 1054 int i; 1055 1056 cd = &sc->ste_cdata; 1057 ld = sc->ste_ldata; 1058 1059 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1060 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1061 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) 1062 return(ENOBUFS); 1063 if (i == (STE_RX_LIST_CNT - 1)) { 1064 cd->ste_rx_chain[i].ste_next = 1065 &cd->ste_rx_chain[0]; 1066 ld->ste_rx_list[i].ste_next = 1067 vtophys(&ld->ste_rx_list[0]); 1068 } else { 1069 cd->ste_rx_chain[i].ste_next = 1070 &cd->ste_rx_chain[i + 1]; 1071 ld->ste_rx_list[i].ste_next = 1072 vtophys(&ld->ste_rx_list[i + 1]); 1073 } 1074 1075 } 1076 1077 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1078 1079 return(0); 1080 } 1081 1082 void ste_init_tx_list(sc) 1083 struct ste_softc *sc; 1084 { 1085 struct ste_chain_data *cd; 1086 struct ste_list_data *ld; 1087 int i; 1088 1089 cd = &sc->ste_cdata; 1090 ld = sc->ste_ldata; 1091 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1092 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1093 cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); 1094 if (i == (STE_TX_LIST_CNT - 1)) 1095 cd->ste_tx_chain[i].ste_next = 1096 &cd->ste_tx_chain[0]; 1097 else 1098 cd->ste_tx_chain[i].ste_next = 1099 &cd->ste_tx_chain[i + 1]; 1100 if (i == 0) 1101 cd->ste_tx_chain[i].ste_prev = 1102 &cd->ste_tx_chain[STE_TX_LIST_CNT - 1]; 1103 else 1104 cd->ste_tx_chain[i].ste_prev = 1105 &cd->ste_tx_chain[i - 1]; 1106 } 1107 1108 bzero((char *)ld->ste_tx_list, 1109 sizeof(struct ste_desc) * STE_TX_LIST_CNT); 1110 1111 cd->ste_tx_prod = 0; 1112 cd->ste_tx_cons = 0; 1113 cd->ste_tx_cnt = 0; 1114 1115 return; 1116 } 1117 1118 void ste_init(xsc) 1119 void *xsc; 1120 { 1121 struct ste_softc *sc = (struct ste_softc *)xsc; 1122 struct ifnet *ifp = &sc->arpcom.ac_if; 1123 struct mii_data *mii; 1124 int i, s; 1125 1126 s = splimp(); 1127 1128 ste_stop(sc); 1129 1130 mii = &sc->sc_mii; 1131 1132 /* Init our MAC address */ 1133 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1134 CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1135 } 1136 1137 /* Init RX list */ 1138 if (ste_init_rx_list(sc) == ENOBUFS) { 1139 printf("%s: initialization failed: no " 1140 "memory for RX buffers\n", sc->sc_dev.dv_xname); 1141 ste_stop(sc); 1142 splx(s); 1143 return; 1144 } 1145 1146 /* Init TX descriptors */ 1147 ste_init_tx_list(sc); 1148 1149 /* Set the TX freethresh value */ 1150 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1151 1152 /* Set the TX start threshold for best performance. */ 1153 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1154 1155 /* Set the TX reclaim threshold. */ 1156 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1157 1158 /* Set up the RX filter. */ 1159 CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); 1160 1161 /* If we want promiscuous mode, set the allframes bit. */ 1162 if (ifp->if_flags & IFF_PROMISC) { 1163 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1164 } else { 1165 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1166 } 1167 1168 /* Set capture broadcast bit to accept broadcast frames. */ 1169 if (ifp->if_flags & IFF_BROADCAST) { 1170 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1171 } else { 1172 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1173 } 1174 1175 ste_setmulti(sc); 1176 1177 /* Load the address of the RX list. */ 1178 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1179 ste_wait(sc); 1180 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1181 vtophys(&sc->ste_ldata->ste_rx_list[0])); 1182 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1183 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1184 1185 /* Set TX polling interval */ 1186 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1187 1188 /* Load address of the TX list */ 1189 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1190 ste_wait(sc); 1191 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1192 vtophys(&sc->ste_ldata->ste_tx_list[0])); 1193 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1194 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1195 ste_wait(sc); 1196 1197 /* Enable receiver and transmitter */ 1198 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1199 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1200 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1201 1202 /* Enable stats counters. */ 1203 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1204 1205 /* Enable interrupts. */ 1206 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1207 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1208 1209 ste_ifmedia_upd(ifp); 1210 1211 ifp->if_flags |= IFF_RUNNING; 1212 ifp->if_flags &= ~IFF_OACTIVE; 1213 1214 splx(s); 1215 1216 timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc); 1217 timeout_add(&sc->sc_stats_tmo, hz); 1218 1219 return; 1220 } 1221 1222 void ste_stop(sc) 1223 struct ste_softc *sc; 1224 { 1225 int i; 1226 struct ifnet *ifp; 1227 1228 ifp = &sc->arpcom.ac_if; 1229 1230 timeout_del(&sc->sc_stats_tmo); 1231 1232 CSR_WRITE_2(sc, STE_IMR, 0); 1233 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); 1234 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); 1235 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); 1236 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1237 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1238 ste_wait(sc); 1239 1240 sc->ste_link = 0; 1241 1242 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1243 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { 1244 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); 1245 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; 1246 } 1247 } 1248 1249 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1250 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { 1251 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); 1252 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; 1253 } 1254 } 1255 1256 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 1257 1258 return; 1259 } 1260 1261 void ste_reset(sc) 1262 struct ste_softc *sc; 1263 { 1264 int i; 1265 1266 STE_SETBIT4(sc, STE_ASICCTL, 1267 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| 1268 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| 1269 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| 1270 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| 1271 STE_ASICCTL_EXTRESET_RESET); 1272 1273 DELAY(100000); 1274 1275 for (i = 0; i < STE_TIMEOUT; i++) { 1276 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1277 break; 1278 } 1279 1280 if (i == STE_TIMEOUT) 1281 printf("%s: global reset never completed\n", sc->sc_dev.dv_xname); 1282 1283 return; 1284 } 1285 1286 int ste_ioctl(ifp, command, data) 1287 struct ifnet *ifp; 1288 u_long command; 1289 caddr_t data; 1290 { 1291 struct ste_softc *sc = ifp->if_softc; 1292 struct ifreq *ifr = (struct ifreq *) data; 1293 struct ifaddr *ifa = (struct ifaddr *)data; 1294 struct mii_data *mii; 1295 int s, error = 0; 1296 1297 s = splimp(); 1298 1299 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1300 splx(s); 1301 return error; 1302 } 1303 1304 switch(command) { 1305 case SIOCSIFADDR: 1306 ifp->if_flags |= IFF_UP; 1307 switch (ifa->ifa_addr->sa_family) { 1308 case AF_INET: 1309 ste_init(sc); 1310 arp_ifinit(&sc->arpcom, ifa); 1311 break; 1312 default: 1313 ste_init(sc); 1314 break; 1315 } 1316 break; 1317 case SIOCSIFFLAGS: 1318 if (ifp->if_flags & IFF_UP) { 1319 if (ifp->if_flags & IFF_RUNNING && 1320 ifp->if_flags & IFF_PROMISC && 1321 !(sc->ste_if_flags & IFF_PROMISC)) { 1322 STE_SETBIT1(sc, STE_RX_MODE, 1323 STE_RXMODE_PROMISC); 1324 } else if (ifp->if_flags & IFF_RUNNING && 1325 !(ifp->if_flags & IFF_PROMISC) && 1326 sc->ste_if_flags & IFF_PROMISC) { 1327 STE_CLRBIT1(sc, STE_RX_MODE, 1328 STE_RXMODE_PROMISC); 1329 } else if (!(ifp->if_flags & IFF_RUNNING)) { 1330 sc->ste_tx_thresh = STE_MIN_FRAMELEN; 1331 ste_init(sc); 1332 } 1333 } else { 1334 if (ifp->if_flags & IFF_RUNNING) 1335 ste_stop(sc); 1336 } 1337 sc->ste_if_flags = ifp->if_flags; 1338 error = 0; 1339 break; 1340 case SIOCADDMULTI: 1341 case SIOCDELMULTI: 1342 error = (command == SIOCADDMULTI) ? 1343 ether_addmulti(ifr, &sc->arpcom) : 1344 ether_delmulti(ifr, &sc->arpcom); 1345 1346 if (error == ENETRESET) { 1347 /* 1348 * Multicast list has changed; set the hardware 1349 * filter accordingly. 1350 */ 1351 ste_setmulti(sc); 1352 error = 0; 1353 } 1354 break; 1355 case SIOCGIFMEDIA: 1356 case SIOCSIFMEDIA: 1357 mii = &sc->sc_mii; 1358 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1359 break; 1360 default: 1361 error = EINVAL; 1362 break; 1363 } 1364 1365 splx(s); 1366 1367 return(error); 1368 } 1369 1370 int ste_encap(sc, c, m_head) 1371 struct ste_softc *sc; 1372 struct ste_chain *c; 1373 struct mbuf *m_head; 1374 { 1375 int frag = 0; 1376 struct ste_frag *f = NULL; 1377 struct mbuf *m; 1378 struct ste_desc *d; 1379 int total_len = 0; 1380 1381 d = c->ste_ptr; 1382 d->ste_ctl = 0; 1383 d->ste_next = 0; 1384 1385 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1386 if (m->m_len != 0) { 1387 if (frag == STE_MAXFRAGS) 1388 break; 1389 total_len += m->m_len; 1390 f = &c->ste_ptr->ste_frags[frag]; 1391 f->ste_addr = vtophys(mtod(m, vaddr_t)); 1392 f->ste_len = m->m_len; 1393 frag++; 1394 } 1395 } 1396 1397 c->ste_mbuf = m_head; 1398 c->ste_ptr->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; 1399 c->ste_ptr->ste_ctl = total_len; 1400 1401 return(0); 1402 } 1403 1404 void ste_start(ifp) 1405 struct ifnet *ifp; 1406 { 1407 struct ste_softc *sc; 1408 struct mbuf *m_head = NULL; 1409 struct ste_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1410 int idx; 1411 1412 sc = ifp->if_softc; 1413 1414 if (!sc->ste_link) 1415 return; 1416 1417 if (ifp->if_flags & IFF_OACTIVE) 1418 return; 1419 1420 idx = sc->ste_cdata.ste_tx_prod; 1421 start_tx = &sc->ste_cdata.ste_tx_chain[idx]; 1422 1423 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { 1424 if ((STE_TX_LIST_CNT - sc->ste_cdata.ste_tx_cnt) < 3) { 1425 ifp->if_flags |= IFF_OACTIVE; 1426 break; 1427 } 1428 1429 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1430 if (m_head == NULL) 1431 break; 1432 1433 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 1434 1435 ste_encap(sc, cur_tx, m_head); 1436 1437 if (prev != NULL) 1438 prev->ste_ptr->ste_next = cur_tx->ste_phys; 1439 prev = cur_tx; 1440 1441 #if NBPFILTER > 0 1442 /* 1443 * If there's a BPF listener, bounce a copy of this frame 1444 * to him. 1445 */ 1446 if (ifp->if_bpf) 1447 bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf); 1448 #endif 1449 1450 STE_INC(idx, STE_TX_LIST_CNT); 1451 sc->ste_cdata.ste_tx_cnt++; 1452 } 1453 1454 if (cur_tx == NULL) 1455 return; 1456 1457 cur_tx->ste_ptr->ste_ctl |= STE_TXCTL_DMAINTR; 1458 1459 /* Start transmission */ 1460 sc->ste_cdata.ste_tx_prod = idx; 1461 start_tx->ste_prev->ste_ptr->ste_next = start_tx->ste_phys; 1462 1463 ifp->if_timer = 5; 1464 1465 return; 1466 } 1467 1468 void ste_watchdog(ifp) 1469 struct ifnet *ifp; 1470 { 1471 struct ste_softc *sc; 1472 1473 sc = ifp->if_softc; 1474 1475 ifp->if_oerrors++; 1476 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1477 1478 ste_txeoc(sc); 1479 ste_txeof(sc); 1480 ste_rxeof(sc); 1481 ste_reset(sc); 1482 ste_init(sc); 1483 1484 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 1485 ste_start(ifp); 1486 1487 return; 1488 } 1489 1490 void ste_shutdown(v) 1491 void *v; 1492 { 1493 struct ste_softc *sc = (struct ste_softc *)v; 1494 1495 ste_stop(sc); 1496 } 1497 1498 struct cfattach ste_ca = { 1499 sizeof(struct ste_softc), ste_probe, ste_attach 1500 }; 1501 1502 struct cfdriver ste_cd = { 1503 0, "ste", DV_IFNET 1504 }; 1505 1506