1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/pci/if_sf.c,v 1.18.2.8 2001/12/16 15:46:07 luigi Exp $ 33 * $DragonFly: src/sys/dev/netif/sf/if_sf.c,v 1.5 2003/11/20 22:07:30 dillon Exp $ 34 * 35 * $FreeBSD: src/sys/pci/if_sf.c,v 1.18.2.8 2001/12/16 15:46:07 luigi Exp $ 36 */ 37 38 /* 39 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 40 * Programming manual is available from: 41 * ftp.adaptec.com:/pub/BBS/userguides/aic6915_pg.pdf. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Department of Electical Engineering 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 50 * controller designed with flexibility and reducing CPU load in mind. 51 * The Starfire offers high and low priority buffer queues, a 52 * producer/consumer index mechanism and several different buffer 53 * queue and completion queue descriptor types. Any one of a number 54 * of different driver designs can be used, depending on system and 55 * OS requirements. This driver makes use of type0 transmit frame 56 * descriptors (since BSD fragments packets across an mbuf chain) 57 * and two RX buffer queues prioritized on size (one queue for small 58 * frames that will fit into a single mbuf, another with full size 59 * mbuf clusters for everything else). The producer/consumer indexes 60 * and completion queues are also used. 61 * 62 * One downside to the Starfire has to do with alignment: buffer 63 * queues must be aligned on 256-byte boundaries, and receive buffers 64 * must be aligned on longword boundaries. The receive buffer alignment 65 * causes problems on the Alpha platform, where the packet payload 66 * should be longword aligned. There is no simple way around this. 67 * 68 * For receive filtering, the Starfire offers 16 perfect filter slots 69 * and a 512-bit hash table. 70 * 71 * The Starfire has no internal transceiver, relying instead on an 72 * external MII-based transceiver. Accessing registers on external 73 * PHYs is done through a special register map rather than with the 74 * usual bitbang MDIO method. 75 * 76 * Acesssing the registers on the Starfire is a little tricky. The 77 * Starfire has a 512K internal register space. When programmed for 78 * PCI memory mapped mode, the entire register space can be accessed 79 * directly. However in I/O space mode, only 256 bytes are directly 80 * mapped into PCI I/O space. The other registers can be accessed 81 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 82 * registers inside the 256-byte I/O window. 83 */ 84 85 #include <sys/param.h> 86 #include <sys/systm.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/socket.h> 92 93 #include <net/if.h> 94 #include <net/if_arp.h> 95 #include <net/ethernet.h> 96 #include <net/if_dl.h> 97 #include <net/if_media.h> 98 99 #include <net/bpf.h> 100 101 #include <vm/vm.h> /* for vtophys */ 102 #include <vm/pmap.h> /* for vtophys */ 103 #include <machine/clock.h> /* for DELAY */ 104 #include <machine/bus_pio.h> 105 #include <machine/bus_memio.h> 106 #include <machine/bus.h> 107 #include <machine/resource.h> 108 #include <sys/bus.h> 109 #include <sys/rman.h> 110 111 #include "../mii_layer/mii.h" 112 #include "../mii_layer/miivar.h" 113 114 /* "controller miibus0" required. See GENERIC if you get errors here. */ 115 #include "miibus_if.h" 116 117 #include <bus/pci/pcireg.h> 118 #include <bus/pci/pcivar.h> 119 120 #define SF_USEIOSPACE 121 122 #include "if_sfreg.h" 123 124 static struct sf_type sf_devs[] = { 125 { AD_VENDORID, AD_DEVICEID_STARFIRE, 126 "Adaptec AIC-6915 10/100BaseTX" }, 127 { 0, 0, NULL } 128 }; 129 130 static int sf_probe (device_t); 131 static int sf_attach (device_t); 132 static int sf_detach (device_t); 133 static void sf_intr (void *); 134 static void sf_stats_update (void *); 135 static void sf_rxeof (struct sf_softc *); 136 static void sf_txeof (struct sf_softc *); 137 static int sf_encap (struct sf_softc *, 138 struct sf_tx_bufdesc_type0 *, 139 struct mbuf *); 140 static void sf_start (struct ifnet *); 141 static int sf_ioctl (struct ifnet *, u_long, caddr_t); 142 static void sf_init (void *); 143 static void sf_stop (struct sf_softc *); 144 static void sf_watchdog (struct ifnet *); 145 static void sf_shutdown (device_t); 146 static int sf_ifmedia_upd (struct ifnet *); 147 static void sf_ifmedia_sts (struct ifnet *, struct ifmediareq *); 148 static void sf_reset (struct sf_softc *); 149 static int sf_init_rx_ring (struct sf_softc *); 150 static void sf_init_tx_ring (struct sf_softc *); 151 static int sf_newbuf (struct sf_softc *, 152 struct sf_rx_bufdesc_type0 *, 153 struct mbuf *); 154 static void sf_setmulti (struct sf_softc *); 155 static int sf_setperf (struct sf_softc *, int, caddr_t); 156 static int sf_sethash (struct sf_softc *, caddr_t, int); 157 #ifdef notdef 158 static int sf_setvlan (struct sf_softc *, int, u_int32_t); 159 #endif 160 161 static u_int8_t sf_read_eeprom (struct sf_softc *, int); 162 static u_int32_t sf_calchash (caddr_t); 163 164 static int sf_miibus_readreg (device_t, int, int); 165 static int sf_miibus_writereg (device_t, int, int, int); 166 static void sf_miibus_statchg (device_t); 167 168 static u_int32_t csr_read_4 (struct sf_softc *, int); 169 static void csr_write_4 (struct sf_softc *, int, u_int32_t); 170 static void sf_txthresh_adjust (struct sf_softc *); 171 172 #ifdef SF_USEIOSPACE 173 #define SF_RES SYS_RES_IOPORT 174 #define SF_RID SF_PCI_LOIO 175 #else 176 #define SF_RES SYS_RES_MEMORY 177 #define SF_RID SF_PCI_LOMEM 178 #endif 179 180 static device_method_t sf_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, sf_probe), 183 DEVMETHOD(device_attach, sf_attach), 184 DEVMETHOD(device_detach, sf_detach), 185 DEVMETHOD(device_shutdown, sf_shutdown), 186 187 /* bus interface */ 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 193 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 194 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 195 196 { 0, 0 } 197 }; 198 199 static driver_t sf_driver = { 200 "sf", 201 sf_methods, 202 sizeof(struct sf_softc), 203 }; 204 205 static devclass_t sf_devclass; 206 207 DECLARE_DUMMY_MODULE(if_sf); 208 DRIVER_MODULE(if_sf, pci, sf_driver, sf_devclass, 0, 0); 209 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 210 211 #define SF_SETBIT(sc, reg, x) \ 212 csr_write_4(sc, reg, csr_read_4(sc, reg) | x) 213 214 #define SF_CLRBIT(sc, reg, x) \ 215 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~x) 216 217 static u_int32_t csr_read_4(sc, reg) 218 struct sf_softc *sc; 219 int reg; 220 { 221 u_int32_t val; 222 223 #ifdef SF_USEIOSPACE 224 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 225 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 226 #else 227 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 228 #endif 229 230 return(val); 231 } 232 233 static u_int8_t sf_read_eeprom(sc, reg) 234 struct sf_softc *sc; 235 int reg; 236 { 237 u_int8_t val; 238 239 val = (csr_read_4(sc, SF_EEADDR_BASE + 240 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 241 242 return(val); 243 } 244 245 static void csr_write_4(sc, reg, val) 246 struct sf_softc *sc; 247 int reg; 248 u_int32_t val; 249 { 250 #ifdef SF_USEIOSPACE 251 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 252 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 253 #else 254 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 255 #endif 256 return; 257 } 258 259 static u_int32_t sf_calchash(addr) 260 caddr_t addr; 261 { 262 u_int32_t crc, carry; 263 int i, j; 264 u_int8_t c; 265 266 /* Compute CRC for the address value. */ 267 crc = 0xFFFFFFFF; /* initial value */ 268 269 for (i = 0; i < 6; i++) { 270 c = *(addr + i); 271 for (j = 0; j < 8; j++) { 272 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 273 crc <<= 1; 274 c >>= 1; 275 if (carry) 276 crc = (crc ^ 0x04c11db6) | carry; 277 } 278 } 279 280 /* return the filter bit position */ 281 return(crc >> 23 & 0x1FF); 282 } 283 284 /* 285 * Copy the address 'mac' into the perfect RX filter entry at 286 * offset 'idx.' The perfect filter only has 16 entries so do 287 * some sanity tests. 288 */ 289 static int sf_setperf(sc, idx, mac) 290 struct sf_softc *sc; 291 int idx; 292 caddr_t mac; 293 { 294 u_int16_t *p; 295 296 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 297 return(EINVAL); 298 299 if (mac == NULL) 300 return(EINVAL); 301 302 p = (u_int16_t *)mac; 303 304 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 305 (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2])); 306 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 307 (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1])); 308 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 309 (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0])); 310 311 return(0); 312 } 313 314 /* 315 * Set the bit in the 512-bit hash table that corresponds to the 316 * specified mac address 'mac.' If 'prio' is nonzero, update the 317 * priority hash table instead of the filter hash table. 318 */ 319 static int sf_sethash(sc, mac, prio) 320 struct sf_softc *sc; 321 caddr_t mac; 322 int prio; 323 { 324 u_int32_t h = 0; 325 326 if (mac == NULL) 327 return(EINVAL); 328 329 h = sf_calchash(mac); 330 331 if (prio) { 332 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 333 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 334 } else { 335 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 336 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 337 } 338 339 return(0); 340 } 341 342 #ifdef notdef 343 /* 344 * Set a VLAN tag in the receive filter. 345 */ 346 static int sf_setvlan(sc, idx, vlan) 347 struct sf_softc *sc; 348 int idx; 349 u_int32_t vlan; 350 { 351 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 352 return(EINVAL); 353 354 csr_write_4(sc, SF_RXFILT_HASH_BASE + 355 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 356 357 return(0); 358 } 359 #endif 360 361 static int sf_miibus_readreg(dev, phy, reg) 362 device_t dev; 363 int phy, reg; 364 { 365 struct sf_softc *sc; 366 int i; 367 u_int32_t val = 0; 368 369 sc = device_get_softc(dev); 370 371 for (i = 0; i < SF_TIMEOUT; i++) { 372 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 373 if (val & SF_MII_DATAVALID) 374 break; 375 } 376 377 if (i == SF_TIMEOUT) 378 return(0); 379 380 if ((val & 0x0000FFFF) == 0xFFFF) 381 return(0); 382 383 return(val & 0x0000FFFF); 384 } 385 386 static int sf_miibus_writereg(dev, phy, reg, val) 387 device_t dev; 388 int phy, reg, val; 389 { 390 struct sf_softc *sc; 391 int i; 392 int busy; 393 394 sc = device_get_softc(dev); 395 396 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 397 398 for (i = 0; i < SF_TIMEOUT; i++) { 399 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 400 if (!(busy & SF_MII_BUSY)) 401 break; 402 } 403 404 return(0); 405 } 406 407 static void sf_miibus_statchg(dev) 408 device_t dev; 409 { 410 struct sf_softc *sc; 411 struct mii_data *mii; 412 413 sc = device_get_softc(dev); 414 mii = device_get_softc(sc->sf_miibus); 415 416 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 417 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); 418 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 419 } else { 420 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); 421 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 422 } 423 424 return; 425 } 426 427 static void sf_setmulti(sc) 428 struct sf_softc *sc; 429 { 430 struct ifnet *ifp; 431 int i; 432 struct ifmultiaddr *ifma; 433 u_int8_t dummy[] = { 0, 0, 0, 0, 0, 0 }; 434 435 ifp = &sc->arpcom.ac_if; 436 437 /* First zot all the existing filters. */ 438 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 439 sf_setperf(sc, i, (char *)&dummy); 440 for (i = SF_RXFILT_HASH_BASE; 441 i < (SF_RXFILT_HASH_MAX + 1); i += 4) 442 csr_write_4(sc, i, 0); 443 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); 444 445 /* Now program new ones. */ 446 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 447 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); 448 } else { 449 i = 1; 450 /* First find the tail of the list. */ 451 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 452 ifma = ifma->ifma_link.le_next) { 453 if (ifma->ifma_link.le_next == NULL) 454 break; 455 } 456 /* Now traverse the list backwards. */ 457 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; 458 ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { 459 if (ifma->ifma_addr->sa_family != AF_LINK) 460 continue; 461 /* 462 * Program the first 15 multicast groups 463 * into the perfect filter. For all others, 464 * use the hash table. 465 */ 466 if (i < SF_RXFILT_PERFECT_CNT) { 467 sf_setperf(sc, i, 468 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 469 i++; 470 continue; 471 } 472 473 sf_sethash(sc, 474 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 475 } 476 } 477 478 return; 479 } 480 481 /* 482 * Set media options. 483 */ 484 static int sf_ifmedia_upd(ifp) 485 struct ifnet *ifp; 486 { 487 struct sf_softc *sc; 488 struct mii_data *mii; 489 490 sc = ifp->if_softc; 491 mii = device_get_softc(sc->sf_miibus); 492 sc->sf_link = 0; 493 if (mii->mii_instance) { 494 struct mii_softc *miisc; 495 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 496 miisc = LIST_NEXT(miisc, mii_list)) 497 mii_phy_reset(miisc); 498 } 499 mii_mediachg(mii); 500 501 return(0); 502 } 503 504 /* 505 * Report current media status. 506 */ 507 static void sf_ifmedia_sts(ifp, ifmr) 508 struct ifnet *ifp; 509 struct ifmediareq *ifmr; 510 { 511 struct sf_softc *sc; 512 struct mii_data *mii; 513 514 sc = ifp->if_softc; 515 mii = device_get_softc(sc->sf_miibus); 516 517 mii_pollstat(mii); 518 ifmr->ifm_active = mii->mii_media_active; 519 ifmr->ifm_status = mii->mii_media_status; 520 521 return; 522 } 523 524 static int sf_ioctl(ifp, command, data) 525 struct ifnet *ifp; 526 u_long command; 527 caddr_t data; 528 { 529 struct sf_softc *sc = ifp->if_softc; 530 struct ifreq *ifr = (struct ifreq *) data; 531 struct mii_data *mii; 532 int s, error = 0; 533 534 s = splimp(); 535 536 switch(command) { 537 case SIOCSIFADDR: 538 case SIOCGIFADDR: 539 case SIOCSIFMTU: 540 error = ether_ioctl(ifp, command, data); 541 break; 542 case SIOCSIFFLAGS: 543 if (ifp->if_flags & IFF_UP) { 544 if (ifp->if_flags & IFF_RUNNING && 545 ifp->if_flags & IFF_PROMISC && 546 !(sc->sf_if_flags & IFF_PROMISC)) { 547 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 548 } else if (ifp->if_flags & IFF_RUNNING && 549 !(ifp->if_flags & IFF_PROMISC) && 550 sc->sf_if_flags & IFF_PROMISC) { 551 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 552 } else if (!(ifp->if_flags & IFF_RUNNING)) 553 sf_init(sc); 554 } else { 555 if (ifp->if_flags & IFF_RUNNING) 556 sf_stop(sc); 557 } 558 sc->sf_if_flags = ifp->if_flags; 559 error = 0; 560 break; 561 case SIOCADDMULTI: 562 case SIOCDELMULTI: 563 sf_setmulti(sc); 564 error = 0; 565 break; 566 case SIOCGIFMEDIA: 567 case SIOCSIFMEDIA: 568 mii = device_get_softc(sc->sf_miibus); 569 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 570 break; 571 default: 572 error = EINVAL; 573 break; 574 } 575 576 (void)splx(s); 577 578 return(error); 579 } 580 581 static void sf_reset(sc) 582 struct sf_softc *sc; 583 { 584 register int i; 585 586 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 587 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 588 DELAY(1000); 589 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 590 591 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 592 593 for (i = 0; i < SF_TIMEOUT; i++) { 594 DELAY(10); 595 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 596 break; 597 } 598 599 if (i == SF_TIMEOUT) 600 printf("sf%d: reset never completed!\n", sc->sf_unit); 601 602 /* Wait a little while for the chip to get its brains in order. */ 603 DELAY(1000); 604 return; 605 } 606 607 /* 608 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 609 * IDs against our list and return a device name if we find a match. 610 * We also check the subsystem ID so that we can identify exactly which 611 * NIC has been found, if possible. 612 */ 613 static int sf_probe(dev) 614 device_t dev; 615 { 616 struct sf_type *t; 617 618 t = sf_devs; 619 620 while(t->sf_name != NULL) { 621 if ((pci_get_vendor(dev) == t->sf_vid) && 622 (pci_get_device(dev) == t->sf_did)) { 623 switch((pci_read_config(dev, 624 SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) { 625 case AD_SUBSYSID_62011_REV0: 626 case AD_SUBSYSID_62011_REV1: 627 device_set_desc(dev, 628 "Adaptec ANA-62011 10/100BaseTX"); 629 return(0); 630 break; 631 case AD_SUBSYSID_62022: 632 device_set_desc(dev, 633 "Adaptec ANA-62022 10/100BaseTX"); 634 return(0); 635 break; 636 case AD_SUBSYSID_62044_REV0: 637 case AD_SUBSYSID_62044_REV1: 638 device_set_desc(dev, 639 "Adaptec ANA-62044 10/100BaseTX"); 640 return(0); 641 break; 642 case AD_SUBSYSID_62020: 643 device_set_desc(dev, 644 "Adaptec ANA-62020 10/100BaseFX"); 645 return(0); 646 break; 647 case AD_SUBSYSID_69011: 648 device_set_desc(dev, 649 "Adaptec ANA-69011 10/100BaseTX"); 650 return(0); 651 break; 652 default: 653 device_set_desc(dev, t->sf_name); 654 return(0); 655 break; 656 } 657 } 658 t++; 659 } 660 661 return(ENXIO); 662 } 663 664 /* 665 * Attach the interface. Allocate softc structures, do ifmedia 666 * setup and ethernet/BPF attach. 667 */ 668 static int sf_attach(dev) 669 device_t dev; 670 { 671 int s, i; 672 u_int32_t command; 673 struct sf_softc *sc; 674 struct ifnet *ifp; 675 int unit, rid, error = 0; 676 677 s = splimp(); 678 679 sc = device_get_softc(dev); 680 unit = device_get_unit(dev); 681 bzero(sc, sizeof(struct sf_softc)); 682 683 /* 684 * Handle power management nonsense. 685 */ 686 command = pci_read_config(dev, SF_PCI_CAPID, 4) & 0x000000FF; 687 if (command == 0x01) { 688 689 command = pci_read_config(dev, SF_PCI_PWRMGMTCTRL, 4); 690 if (command & SF_PSTATE_MASK) { 691 u_int32_t iobase, membase, irq; 692 693 /* Save important PCI config data. */ 694 iobase = pci_read_config(dev, SF_PCI_LOIO, 4); 695 membase = pci_read_config(dev, SF_PCI_LOMEM, 4); 696 irq = pci_read_config(dev, SF_PCI_INTLINE, 4); 697 698 /* Reset the power state. */ 699 printf("sf%d: chip is in D%d power mode " 700 "-- setting to D0\n", unit, command & SF_PSTATE_MASK); 701 command &= 0xFFFFFFFC; 702 pci_write_config(dev, SF_PCI_PWRMGMTCTRL, command, 4); 703 704 /* Restore PCI config data. */ 705 pci_write_config(dev, SF_PCI_LOIO, iobase, 4); 706 pci_write_config(dev, SF_PCI_LOMEM, membase, 4); 707 pci_write_config(dev, SF_PCI_INTLINE, irq, 4); 708 } 709 } 710 711 /* 712 * Map control/status registers. 713 */ 714 command = pci_read_config(dev, PCIR_COMMAND, 4); 715 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 716 pci_write_config(dev, PCIR_COMMAND, command, 4); 717 command = pci_read_config(dev, PCIR_COMMAND, 4); 718 719 #ifdef SF_USEIOSPACE 720 if (!(command & PCIM_CMD_PORTEN)) { 721 printf("sf%d: failed to enable I/O ports!\n", unit); 722 error = ENXIO; 723 goto fail; 724 } 725 #else 726 if (!(command & PCIM_CMD_MEMEN)) { 727 printf("sf%d: failed to enable memory mapping!\n", unit); 728 error = ENXIO; 729 goto fail; 730 } 731 #endif 732 733 rid = SF_RID; 734 sc->sf_res = bus_alloc_resource(dev, SF_RES, &rid, 735 0, ~0, 1, RF_ACTIVE); 736 737 if (sc->sf_res == NULL) { 738 printf ("sf%d: couldn't map ports\n", unit); 739 error = ENXIO; 740 goto fail; 741 } 742 743 sc->sf_btag = rman_get_bustag(sc->sf_res); 744 sc->sf_bhandle = rman_get_bushandle(sc->sf_res); 745 746 /* Allocate interrupt */ 747 rid = 0; 748 sc->sf_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 749 RF_SHAREABLE | RF_ACTIVE); 750 751 if (sc->sf_irq == NULL) { 752 printf("sf%d: couldn't map interrupt\n", unit); 753 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); 754 error = ENXIO; 755 goto fail; 756 } 757 758 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET, 759 sf_intr, sc, &sc->sf_intrhand); 760 761 if (error) { 762 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_res); 763 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); 764 printf("sf%d: couldn't set up irq\n", unit); 765 goto fail; 766 } 767 768 callout_handle_init(&sc->sf_stat_ch); 769 770 /* Reset the adapter. */ 771 sf_reset(sc); 772 773 /* 774 * Get station address from the EEPROM. 775 */ 776 for (i = 0; i < ETHER_ADDR_LEN; i++) 777 sc->arpcom.ac_enaddr[i] = 778 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 779 780 /* 781 * An Adaptec chip was detected. Inform the world. 782 */ 783 printf("sf%d: Ethernet address: %6D\n", unit, 784 sc->arpcom.ac_enaddr, ":"); 785 786 sc->sf_unit = unit; 787 788 /* Allocate the descriptor queues. */ 789 sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF, 790 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 791 792 if (sc->sf_ldata == NULL) { 793 printf("sf%d: no memory for list buffers!\n", unit); 794 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 795 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 796 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); 797 error = ENXIO; 798 goto fail; 799 } 800 801 bzero(sc->sf_ldata, sizeof(struct sf_list_data)); 802 803 /* Do MII setup. */ 804 if (mii_phy_probe(dev, &sc->sf_miibus, 805 sf_ifmedia_upd, sf_ifmedia_sts)) { 806 printf("sf%d: MII without any phy!\n", sc->sf_unit); 807 contigfree(sc->sf_ldata,sizeof(struct sf_list_data),M_DEVBUF); 808 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 809 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 810 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); 811 error = ENXIO; 812 goto fail; 813 } 814 815 ifp = &sc->arpcom.ac_if; 816 ifp->if_softc = sc; 817 ifp->if_unit = unit; 818 ifp->if_name = "sf"; 819 ifp->if_mtu = ETHERMTU; 820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 821 ifp->if_ioctl = sf_ioctl; 822 ifp->if_output = ether_output; 823 ifp->if_start = sf_start; 824 ifp->if_watchdog = sf_watchdog; 825 ifp->if_init = sf_init; 826 ifp->if_baudrate = 10000000; 827 ifp->if_snd.ifq_maxlen = SF_TX_DLIST_CNT - 1; 828 829 /* 830 * Call MI attach routine. 831 */ 832 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 833 834 fail: 835 splx(s); 836 return(error); 837 } 838 839 static int sf_detach(dev) 840 device_t dev; 841 { 842 struct sf_softc *sc; 843 struct ifnet *ifp; 844 int s; 845 846 s = splimp(); 847 848 sc = device_get_softc(dev); 849 ifp = &sc->arpcom.ac_if; 850 851 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 852 sf_stop(sc); 853 854 bus_generic_detach(dev); 855 device_delete_child(dev, sc->sf_miibus); 856 857 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 858 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 859 bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); 860 861 contigfree(sc->sf_ldata, sizeof(struct sf_list_data), M_DEVBUF); 862 863 splx(s); 864 865 return(0); 866 } 867 868 static int sf_init_rx_ring(sc) 869 struct sf_softc *sc; 870 { 871 struct sf_list_data *ld; 872 int i; 873 874 ld = sc->sf_ldata; 875 876 bzero((char *)ld->sf_rx_dlist_big, 877 sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT); 878 bzero((char *)ld->sf_rx_clist, 879 sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT); 880 881 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 882 if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS) 883 return(ENOBUFS); 884 } 885 886 return(0); 887 } 888 889 static void sf_init_tx_ring(sc) 890 struct sf_softc *sc; 891 { 892 struct sf_list_data *ld; 893 int i; 894 895 ld = sc->sf_ldata; 896 897 bzero((char *)ld->sf_tx_dlist, 898 sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT); 899 bzero((char *)ld->sf_tx_clist, 900 sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT); 901 902 for (i = 0; i < SF_TX_DLIST_CNT; i++) 903 ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID; 904 for (i = 0; i < SF_TX_CLIST_CNT; i++) 905 ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX; 906 907 ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1; 908 sc->sf_tx_cnt = 0; 909 910 return; 911 } 912 913 static int sf_newbuf(sc, c, m) 914 struct sf_softc *sc; 915 struct sf_rx_bufdesc_type0 *c; 916 struct mbuf *m; 917 { 918 struct mbuf *m_new = NULL; 919 920 if (m == NULL) { 921 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 922 if (m_new == NULL) 923 return(ENOBUFS); 924 925 MCLGET(m_new, M_DONTWAIT); 926 if (!(m_new->m_flags & M_EXT)) { 927 m_freem(m_new); 928 return(ENOBUFS); 929 } 930 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 931 } else { 932 m_new = m; 933 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 934 m_new->m_data = m_new->m_ext.ext_buf; 935 } 936 937 m_adj(m_new, sizeof(u_int64_t)); 938 939 c->sf_mbuf = m_new; 940 c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t))); 941 c->sf_valid = 1; 942 943 return(0); 944 } 945 946 /* 947 * The starfire is programmed to use 'normal' mode for packet reception, 948 * which means we use the consumer/producer model for both the buffer 949 * descriptor queue and the completion descriptor queue. The only problem 950 * with this is that it involves a lot of register accesses: we have to 951 * read the RX completion consumer and producer indexes and the RX buffer 952 * producer index, plus the RX completion consumer and RX buffer producer 953 * indexes have to be updated. It would have been easier if Adaptec had 954 * put each index in a separate register, especially given that the damn 955 * NIC has a 512K register space. 956 * 957 * In spite of all the lovely features that Adaptec crammed into the 6915, 958 * it is marred by one truly stupid design flaw, which is that receive 959 * buffer addresses must be aligned on a longword boundary. This forces 960 * the packet payload to be unaligned, which is suboptimal on the x86 and 961 * completely unuseable on the Alpha. Our only recourse is to copy received 962 * packets into properly aligned buffers before handing them off. 963 */ 964 965 static void sf_rxeof(sc) 966 struct sf_softc *sc; 967 { 968 struct ether_header *eh; 969 struct mbuf *m; 970 struct ifnet *ifp; 971 struct sf_rx_bufdesc_type0 *desc; 972 struct sf_rx_cmpdesc_type3 *cur_rx; 973 u_int32_t rxcons, rxprod; 974 int cmpprodidx, cmpconsidx, bufprodidx; 975 976 ifp = &sc->arpcom.ac_if; 977 978 rxcons = csr_read_4(sc, SF_CQ_CONSIDX); 979 rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1); 980 cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX)); 981 cmpconsidx = SF_IDX_LO(rxcons); 982 bufprodidx = SF_IDX_LO(rxprod); 983 984 while (cmpconsidx != cmpprodidx) { 985 struct mbuf *m0; 986 987 cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; 988 desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; 989 m = desc->sf_mbuf; 990 SF_INC(cmpconsidx, SF_RX_CLIST_CNT); 991 SF_INC(bufprodidx, SF_RX_DLIST_CNT); 992 993 if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) { 994 ifp->if_ierrors++; 995 sf_newbuf(sc, desc, m); 996 continue; 997 } 998 999 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1000 cur_rx->sf_len + ETHER_ALIGN, 0, ifp, NULL); 1001 sf_newbuf(sc, desc, m); 1002 if (m0 == NULL) { 1003 ifp->if_ierrors++; 1004 continue; 1005 } 1006 m_adj(m0, ETHER_ALIGN); 1007 m = m0; 1008 1009 eh = mtod(m, struct ether_header *); 1010 ifp->if_ipackets++; 1011 1012 /* Remove header from mbuf and pass it on. */ 1013 m_adj(m, sizeof(struct ether_header)); 1014 ether_input(ifp, eh, m); 1015 } 1016 1017 csr_write_4(sc, SF_CQ_CONSIDX, 1018 (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx); 1019 csr_write_4(sc, SF_RXDQ_PTR_Q1, 1020 (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx); 1021 1022 return; 1023 } 1024 1025 /* 1026 * Read the transmit status from the completion queue and release 1027 * mbufs. Note that the buffer descriptor index in the completion 1028 * descriptor is an offset from the start of the transmit buffer 1029 * descriptor list in bytes. This is important because the manual 1030 * gives the impression that it should match the producer/consumer 1031 * index, which is the offset in 8 byte blocks. 1032 */ 1033 static void sf_txeof(sc) 1034 struct sf_softc *sc; 1035 { 1036 int txcons, cmpprodidx, cmpconsidx; 1037 struct sf_tx_cmpdesc_type1 *cur_cmp; 1038 struct sf_tx_bufdesc_type0 *cur_tx; 1039 struct ifnet *ifp; 1040 1041 ifp = &sc->arpcom.ac_if; 1042 1043 txcons = csr_read_4(sc, SF_CQ_CONSIDX); 1044 cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX)); 1045 cmpconsidx = SF_IDX_HI(txcons); 1046 1047 while (cmpconsidx != cmpprodidx) { 1048 cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx]; 1049 cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7]; 1050 1051 if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK) 1052 ifp->if_opackets++; 1053 else { 1054 if (cur_cmp->sf_txstat & SF_TXSTAT_TX_UNDERRUN) 1055 sf_txthresh_adjust(sc); 1056 ifp->if_oerrors++; 1057 } 1058 1059 sc->sf_tx_cnt--; 1060 if (cur_tx->sf_mbuf != NULL) { 1061 m_freem(cur_tx->sf_mbuf); 1062 cur_tx->sf_mbuf = NULL; 1063 } else 1064 break; 1065 SF_INC(cmpconsidx, SF_TX_CLIST_CNT); 1066 } 1067 1068 ifp->if_timer = 0; 1069 ifp->if_flags &= ~IFF_OACTIVE; 1070 1071 csr_write_4(sc, SF_CQ_CONSIDX, 1072 (txcons & ~SF_CQ_CONSIDX_TXQ) | 1073 ((cmpconsidx << 16) & 0xFFFF0000)); 1074 1075 return; 1076 } 1077 1078 static void sf_txthresh_adjust(sc) 1079 struct sf_softc *sc; 1080 { 1081 u_int32_t txfctl; 1082 u_int8_t txthresh; 1083 1084 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1085 txthresh = txfctl & SF_TXFRMCTL_TXTHRESH; 1086 if (txthresh < 0xFF) { 1087 txthresh++; 1088 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1089 txfctl |= txthresh; 1090 #ifdef DIAGNOSTIC 1091 printf("sf%d: tx underrun, increasing " 1092 "tx threshold to %d bytes\n", 1093 sc->sf_unit, txthresh * 4); 1094 #endif 1095 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1096 } 1097 1098 return; 1099 } 1100 1101 static void sf_intr(arg) 1102 void *arg; 1103 { 1104 struct sf_softc *sc; 1105 struct ifnet *ifp; 1106 u_int32_t status; 1107 1108 sc = arg; 1109 ifp = &sc->arpcom.ac_if; 1110 1111 if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) 1112 return; 1113 1114 /* Disable interrupts. */ 1115 csr_write_4(sc, SF_IMR, 0x00000000); 1116 1117 for (;;) { 1118 status = csr_read_4(sc, SF_ISR); 1119 if (status) 1120 csr_write_4(sc, SF_ISR, status); 1121 1122 if (!(status & SF_INTRS)) 1123 break; 1124 1125 if (status & SF_ISR_RXDQ1_DMADONE) 1126 sf_rxeof(sc); 1127 1128 if (status & SF_ISR_TX_TXDONE || 1129 status & SF_ISR_TX_DMADONE || 1130 status & SF_ISR_TX_QUEUEDONE) 1131 sf_txeof(sc); 1132 1133 if (status & SF_ISR_TX_LOFIFO) 1134 sf_txthresh_adjust(sc); 1135 1136 if (status & SF_ISR_ABNORMALINTR) { 1137 if (status & SF_ISR_STATSOFLOW) { 1138 untimeout(sf_stats_update, sc, 1139 sc->sf_stat_ch); 1140 sf_stats_update(sc); 1141 } else 1142 sf_init(sc); 1143 } 1144 } 1145 1146 /* Re-enable interrupts. */ 1147 csr_write_4(sc, SF_IMR, SF_INTRS); 1148 1149 if (ifp->if_snd.ifq_head != NULL) 1150 sf_start(ifp); 1151 1152 return; 1153 } 1154 1155 static void sf_init(xsc) 1156 void *xsc; 1157 { 1158 struct sf_softc *sc; 1159 struct ifnet *ifp; 1160 struct mii_data *mii; 1161 int i, s; 1162 1163 s = splimp(); 1164 1165 sc = xsc; 1166 ifp = &sc->arpcom.ac_if; 1167 mii = device_get_softc(sc->sf_miibus); 1168 1169 sf_stop(sc); 1170 sf_reset(sc); 1171 1172 /* Init all the receive filter registers */ 1173 for (i = SF_RXFILT_PERFECT_BASE; 1174 i < (SF_RXFILT_HASH_MAX + 1); i += 4) 1175 csr_write_4(sc, i, 0); 1176 1177 /* Empty stats counter registers. */ 1178 for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++) 1179 csr_write_4(sc, SF_STATS_BASE + 1180 (i + sizeof(u_int32_t)), 0); 1181 1182 /* Init our MAC address */ 1183 csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1184 csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1185 sf_setperf(sc, 0, (caddr_t)&sc->arpcom.ac_enaddr); 1186 1187 if (sf_init_rx_ring(sc) == ENOBUFS) { 1188 printf("sf%d: initialization failed: no " 1189 "memory for rx buffers\n", sc->sf_unit); 1190 (void)splx(s); 1191 return; 1192 } 1193 1194 sf_init_tx_ring(sc); 1195 1196 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN); 1197 1198 /* If we want promiscuous mode, set the allframes bit. */ 1199 if (ifp->if_flags & IFF_PROMISC) { 1200 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 1201 } else { 1202 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); 1203 } 1204 1205 if (ifp->if_flags & IFF_BROADCAST) { 1206 SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); 1207 } else { 1208 SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); 1209 } 1210 1211 /* 1212 * Load the multicast filter. 1213 */ 1214 sf_setmulti(sc); 1215 1216 /* Init the completion queue indexes */ 1217 csr_write_4(sc, SF_CQ_CONSIDX, 0); 1218 csr_write_4(sc, SF_CQ_PRODIDX, 0); 1219 1220 /* Init the RX completion queue */ 1221 csr_write_4(sc, SF_RXCQ_CTL_1, 1222 vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR); 1223 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3); 1224 1225 /* Init RX DMA control. */ 1226 SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS); 1227 1228 /* Init the RX buffer descriptor queue. */ 1229 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 1230 vtophys(sc->sf_ldata->sf_rx_dlist_big)); 1231 csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES); 1232 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 1233 1234 /* Init the TX completion queue */ 1235 csr_write_4(sc, SF_TXCQ_CTL, 1236 vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR); 1237 1238 /* Init the TX buffer descriptor queue. */ 1239 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 1240 vtophys(sc->sf_ldata->sf_tx_dlist)); 1241 SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX); 1242 csr_write_4(sc, SF_TXDQ_CTL, 1243 SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES); 1244 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP); 1245 1246 /* Enable autopadding of short TX frames. */ 1247 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 1248 1249 /* Enable interrupts. */ 1250 csr_write_4(sc, SF_IMR, SF_INTRS); 1251 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 1252 1253 /* Enable the RX and TX engines. */ 1254 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB); 1255 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB); 1256 1257 /*mii_mediachg(mii);*/ 1258 sf_ifmedia_upd(ifp); 1259 1260 ifp->if_flags |= IFF_RUNNING; 1261 ifp->if_flags &= ~IFF_OACTIVE; 1262 1263 sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); 1264 1265 splx(s); 1266 1267 return; 1268 } 1269 1270 static int sf_encap(sc, c, m_head) 1271 struct sf_softc *sc; 1272 struct sf_tx_bufdesc_type0 *c; 1273 struct mbuf *m_head; 1274 { 1275 int frag = 0; 1276 struct sf_frag *f = NULL; 1277 struct mbuf *m; 1278 1279 m = m_head; 1280 1281 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1282 if (m->m_len != 0) { 1283 if (frag == SF_MAXFRAGS) 1284 break; 1285 f = &c->sf_frags[frag]; 1286 if (frag == 0) 1287 f->sf_pktlen = m_head->m_pkthdr.len; 1288 f->sf_fraglen = m->m_len; 1289 f->sf_addr = vtophys(mtod(m, vm_offset_t)); 1290 frag++; 1291 } 1292 } 1293 1294 if (m != NULL) { 1295 struct mbuf *m_new = NULL; 1296 1297 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1298 if (m_new == NULL) { 1299 printf("sf%d: no memory for tx list", sc->sf_unit); 1300 return(1); 1301 } 1302 1303 if (m_head->m_pkthdr.len > MHLEN) { 1304 MCLGET(m_new, M_DONTWAIT); 1305 if (!(m_new->m_flags & M_EXT)) { 1306 m_freem(m_new); 1307 printf("sf%d: no memory for tx list", 1308 sc->sf_unit); 1309 return(1); 1310 } 1311 } 1312 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1313 mtod(m_new, caddr_t)); 1314 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1315 m_freem(m_head); 1316 m_head = m_new; 1317 f = &c->sf_frags[0]; 1318 f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len; 1319 f->sf_addr = vtophys(mtod(m_head, caddr_t)); 1320 frag = 1; 1321 } 1322 1323 c->sf_mbuf = m_head; 1324 c->sf_id = SF_TX_BUFDESC_ID; 1325 c->sf_fragcnt = frag; 1326 c->sf_intr = 1; 1327 c->sf_caltcp = 0; 1328 c->sf_crcen = 1; 1329 1330 return(0); 1331 } 1332 1333 static void sf_start(ifp) 1334 struct ifnet *ifp; 1335 { 1336 struct sf_softc *sc; 1337 struct sf_tx_bufdesc_type0 *cur_tx = NULL; 1338 struct mbuf *m_head = NULL; 1339 int i, txprod; 1340 1341 sc = ifp->if_softc; 1342 1343 if (!sc->sf_link && ifp->if_snd.ifq_len < 10) 1344 return; 1345 1346 if (ifp->if_flags & IFF_OACTIVE) 1347 return; 1348 1349 txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); 1350 i = SF_IDX_HI(txprod) >> 4; 1351 1352 if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { 1353 printf("sf%d: TX ring full, resetting\n", sc->sf_unit); 1354 sf_init(sc); 1355 txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); 1356 i = SF_IDX_HI(txprod) >> 4; 1357 } 1358 1359 while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) { 1360 if (sc->sf_tx_cnt >= (SF_TX_DLIST_CNT - 5)) { 1361 ifp->if_flags |= IFF_OACTIVE; 1362 cur_tx = NULL; 1363 break; 1364 } 1365 IF_DEQUEUE(&ifp->if_snd, m_head); 1366 if (m_head == NULL) 1367 break; 1368 1369 cur_tx = &sc->sf_ldata->sf_tx_dlist[i]; 1370 if (sf_encap(sc, cur_tx, m_head)) { 1371 IF_PREPEND(&ifp->if_snd, m_head); 1372 ifp->if_flags |= IFF_OACTIVE; 1373 cur_tx = NULL; 1374 break; 1375 } 1376 1377 1378 /* 1379 * If there's a BPF listener, bounce a copy of this frame 1380 * to him. 1381 */ 1382 if (ifp->if_bpf) 1383 bpf_mtap(ifp, m_head); 1384 1385 SF_INC(i, SF_TX_DLIST_CNT); 1386 sc->sf_tx_cnt++; 1387 /* 1388 * Don't get the TX DMA queue get too full. 1389 */ 1390 if (sc->sf_tx_cnt > 64) 1391 break; 1392 } 1393 1394 if (cur_tx == NULL) 1395 return; 1396 1397 /* Transmit */ 1398 csr_write_4(sc, SF_TXDQ_PRODIDX, 1399 (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) | 1400 ((i << 20) & 0xFFFF0000)); 1401 1402 ifp->if_timer = 5; 1403 1404 return; 1405 } 1406 1407 static void sf_stop(sc) 1408 struct sf_softc *sc; 1409 { 1410 int i; 1411 struct ifnet *ifp; 1412 1413 ifp = &sc->arpcom.ac_if; 1414 1415 untimeout(sf_stats_update, sc, sc->sf_stat_ch); 1416 1417 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 1418 csr_write_4(sc, SF_CQ_CONSIDX, 0); 1419 csr_write_4(sc, SF_CQ_PRODIDX, 0); 1420 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 1421 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 1422 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 1423 csr_write_4(sc, SF_TXCQ_CTL, 0); 1424 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 1425 csr_write_4(sc, SF_TXDQ_CTL, 0); 1426 sf_reset(sc); 1427 1428 sc->sf_link = 0; 1429 1430 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1431 if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) { 1432 m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf); 1433 sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL; 1434 } 1435 } 1436 1437 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1438 if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { 1439 m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf); 1440 sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL; 1441 } 1442 } 1443 1444 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 1445 1446 return; 1447 } 1448 1449 /* 1450 * Note: it is important that this function not be interrupted. We 1451 * use a two-stage register access scheme: if we are interrupted in 1452 * between setting the indirect address register and reading from the 1453 * indirect data register, the contents of the address register could 1454 * be changed out from under us. 1455 */ 1456 static void sf_stats_update(xsc) 1457 void *xsc; 1458 { 1459 struct sf_softc *sc; 1460 struct ifnet *ifp; 1461 struct mii_data *mii; 1462 struct sf_stats stats; 1463 u_int32_t *ptr; 1464 int i, s; 1465 1466 s = splimp(); 1467 1468 sc = xsc; 1469 ifp = &sc->arpcom.ac_if; 1470 mii = device_get_softc(sc->sf_miibus); 1471 1472 ptr = (u_int32_t *)&stats; 1473 for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) 1474 ptr[i] = csr_read_4(sc, SF_STATS_BASE + 1475 (i + sizeof(u_int32_t))); 1476 1477 for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) 1478 csr_write_4(sc, SF_STATS_BASE + 1479 (i + sizeof(u_int32_t)), 0); 1480 1481 ifp->if_collisions += stats.sf_tx_single_colls + 1482 stats.sf_tx_multi_colls + stats.sf_tx_excess_colls; 1483 1484 mii_tick(mii); 1485 if (!sc->sf_link) { 1486 mii_pollstat(mii); 1487 if (mii->mii_media_status & IFM_ACTIVE && 1488 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1489 sc->sf_link++; 1490 if (ifp->if_snd.ifq_head != NULL) 1491 sf_start(ifp); 1492 } 1493 1494 sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); 1495 1496 splx(s); 1497 1498 return; 1499 } 1500 1501 static void sf_watchdog(ifp) 1502 struct ifnet *ifp; 1503 { 1504 struct sf_softc *sc; 1505 1506 sc = ifp->if_softc; 1507 1508 ifp->if_oerrors++; 1509 printf("sf%d: watchdog timeout\n", sc->sf_unit); 1510 1511 sf_stop(sc); 1512 sf_reset(sc); 1513 sf_init(sc); 1514 1515 if (ifp->if_snd.ifq_head != NULL) 1516 sf_start(ifp); 1517 1518 return; 1519 } 1520 1521 static void sf_shutdown(dev) 1522 device_t dev; 1523 { 1524 struct sf_softc *sc; 1525 1526 sc = device_get_softc(dev); 1527 1528 sf_stop(sc); 1529 1530 return; 1531 } 1532