1 /* $NetBSD: aic6915.c,v 1.9 2002/12/21 16:15:28 kristerw Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Device driver for the Adaptec AIC-6915 (``Starfire'') 41 * 10/100 Ethernet controller. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.9 2002/12/21 16:15:28 kristerw Exp $"); 46 47 #include "bpfilter.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/callout.h> 52 #include <sys/mbuf.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/socket.h> 56 #include <sys/ioctl.h> 57 #include <sys/errno.h> 58 #include <sys/device.h> 59 60 #include <uvm/uvm_extern.h> 61 62 #include <net/if.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_ether.h> 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 71 #include <machine/bus.h> 72 #include <machine/intr.h> 73 74 #include <dev/mii/miivar.h> 75 76 #include <dev/ic/aic6915reg.h> 77 #include <dev/ic/aic6915var.h> 78 79 void sf_start(struct ifnet *); 80 void sf_watchdog(struct ifnet *); 81 int sf_ioctl(struct ifnet *, u_long, caddr_t); 82 int sf_init(struct ifnet *); 83 void sf_stop(struct ifnet *, int); 84 85 void sf_shutdown(void *); 86 87 void sf_txintr(struct sf_softc *); 88 void sf_rxintr(struct sf_softc *); 89 void sf_stats_update(struct sf_softc *); 90 91 void sf_reset(struct sf_softc *); 92 void sf_macreset(struct sf_softc *); 93 void sf_rxdrain(struct sf_softc *); 94 int sf_add_rxbuf(struct sf_softc *, int); 95 uint8_t sf_read_eeprom(struct sf_softc *, int); 96 void sf_set_filter(struct sf_softc *); 97 98 int sf_mii_read(struct device *, int, int); 99 void sf_mii_write(struct device *, int, int, int); 100 void sf_mii_statchg(struct device *); 101 102 void sf_tick(void *); 103 104 int sf_mediachange(struct ifnet *); 105 void sf_mediastatus(struct ifnet *, struct ifmediareq *); 106 107 #define sf_funcreg_read(sc, reg) \ 108 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg)) 109 #define sf_funcreg_write(sc, reg, val) \ 110 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val)) 111 112 static __inline uint32_t 113 sf_reg_read(struct sf_softc *sc, bus_addr_t reg) 114 { 115 116 if (__predict_false(sc->sc_iomapped)) { 117 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, 118 reg); 119 return (bus_space_read_4(sc->sc_st, sc->sc_sh, 120 SF_IndirectIoDataPort)); 121 } 122 123 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg)); 124 } 125 126 static __inline void 127 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val) 128 { 129 130 if (__predict_false(sc->sc_iomapped)) { 131 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, 132 reg); 133 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort, 134 val); 135 return; 136 } 137 138 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val); 139 } 140 141 #define sf_genreg_read(sc, reg) \ 142 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET) 143 #define sf_genreg_write(sc, reg, val) \ 144 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val)) 145 146 /* 147 * sf_attach: 148 * 149 * Attach a Starfire interface to the system. 150 */ 151 void 152 sf_attach(struct sf_softc *sc) 153 { 154 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 155 int i, rseg, error; 156 bus_dma_segment_t seg; 157 u_int8_t enaddr[ETHER_ADDR_LEN]; 158 159 callout_init(&sc->sc_tick_callout); 160 161 /* 162 * If we're I/O mapped, the functional register handle is 163 * the same as the base handle. If we're memory mapped, 164 * carve off a chunk of the register space for the functional 165 * registers, to save on arithmetic later. 166 */ 167 if (sc->sc_iomapped) 168 sc->sc_sh_func = sc->sc_sh; 169 else { 170 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh, 171 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) { 172 printf("%s: unable to sub-region functional " 173 "registers, error = %d\n", sc->sc_dev.dv_xname, 174 error); 175 return; 176 } 177 } 178 179 /* 180 * Initialize the transmit threshold for this interface. The 181 * manual describes the default as 4 * 16 bytes. We start out 182 * at 10 * 16 bytes, to avoid a bunch of initial underruns on 183 * several platforms. 184 */ 185 sc->sc_txthresh = 10; 186 187 /* 188 * Allocate the control data structures, and create and load the 189 * DMA map for it. 190 */ 191 if ((error = bus_dmamem_alloc(sc->sc_dmat, 192 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 193 BUS_DMA_NOWAIT)) != 0) { 194 printf("%s: unable to allocate control data, error = %d\n", 195 sc->sc_dev.dv_xname, error); 196 goto fail_0; 197 } 198 199 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 200 sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data, 201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 202 printf("%s: unable to map control data, error = %d\n", 203 sc->sc_dev.dv_xname, error); 204 goto fail_1; 205 } 206 207 if ((error = bus_dmamap_create(sc->sc_dmat, 208 sizeof(struct sf_control_data), 1, 209 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT, 210 &sc->sc_cddmamap)) != 0) { 211 printf("%s: unable to create control data DMA map, " 212 "error = %d\n", sc->sc_dev.dv_xname, error); 213 goto fail_2; 214 } 215 216 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 217 sc->sc_control_data, sizeof(struct sf_control_data), NULL, 218 BUS_DMA_NOWAIT)) != 0) { 219 printf("%s: unable to load control data DMA map, error = %d\n", 220 sc->sc_dev.dv_xname, error); 221 goto fail_3; 222 } 223 224 /* 225 * Create the transmit buffer DMA maps. 226 */ 227 for (i = 0; i < SF_NTXDESC; i++) { 228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 229 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 230 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 231 printf("%s: unable to create tx DMA map %d, " 232 "error = %d\n", sc->sc_dev.dv_xname, i, error); 233 goto fail_4; 234 } 235 } 236 237 /* 238 * Create the receive buffer DMA maps. 239 */ 240 for (i = 0; i < SF_NRXDESC; i++) { 241 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 242 MCLBYTES, 0, BUS_DMA_NOWAIT, 243 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 244 printf("%s: unable to create rx DMA map %d, " 245 "error = %d\n", sc->sc_dev.dv_xname, i, error); 246 goto fail_5; 247 } 248 } 249 250 /* 251 * Reset the chip to a known state. 252 */ 253 sf_reset(sc); 254 255 /* 256 * Read the Ethernet address from the EEPROM. 257 */ 258 for (i = 0; i < ETHER_ADDR_LEN; i++) 259 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i); 260 261 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 262 ether_sprintf(enaddr)); 263 264 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64) 265 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname); 266 267 /* 268 * Initialize our media structures and probe the MII. 269 */ 270 sc->sc_mii.mii_ifp = ifp; 271 sc->sc_mii.mii_readreg = sf_mii_read; 272 sc->sc_mii.mii_writereg = sf_mii_write; 273 sc->sc_mii.mii_statchg = sf_mii_statchg; 274 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange, 275 sf_mediastatus); 276 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 277 MII_OFFSET_ANY, 0); 278 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 279 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 281 } else 282 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 283 284 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 285 ifp->if_softc = sc; 286 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 287 ifp->if_ioctl = sf_ioctl; 288 ifp->if_start = sf_start; 289 ifp->if_watchdog = sf_watchdog; 290 ifp->if_init = sf_init; 291 ifp->if_stop = sf_stop; 292 IFQ_SET_READY(&ifp->if_snd); 293 294 /* 295 * Attach the interface. 296 */ 297 if_attach(ifp); 298 ether_ifattach(ifp, enaddr); 299 300 /* 301 * Make sure the interface is shutdown during reboot. 302 */ 303 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc); 304 if (sc->sc_sdhook == NULL) 305 printf("%s: WARNING: unable to establish shutdown hook\n", 306 sc->sc_dev.dv_xname); 307 return; 308 309 /* 310 * Free any resources we've allocated during the failed attach 311 * attempt. Do this in reverse order an fall through. 312 */ 313 fail_5: 314 for (i = 0; i < SF_NRXDESC; i++) { 315 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 316 bus_dmamap_destroy(sc->sc_dmat, 317 sc->sc_rxsoft[i].ds_dmamap); 318 } 319 fail_4: 320 for (i = 0; i < SF_NTXDESC; i++) { 321 if (sc->sc_txsoft[i].ds_dmamap != NULL) 322 bus_dmamap_destroy(sc->sc_dmat, 323 sc->sc_txsoft[i].ds_dmamap); 324 } 325 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 326 fail_3: 327 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 328 fail_2: 329 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data, 330 sizeof(struct sf_control_data)); 331 fail_1: 332 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 333 fail_0: 334 return; 335 } 336 337 /* 338 * sf_shutdown: 339 * 340 * Shutdown hook -- make sure the interface is stopped at reboot. 341 */ 342 void 343 sf_shutdown(void *arg) 344 { 345 struct sf_softc *sc = arg; 346 347 sf_stop(&sc->sc_ethercom.ec_if, 1); 348 } 349 350 /* 351 * sf_start: [ifnet interface function] 352 * 353 * Start packet transmission on the interface. 354 */ 355 void 356 sf_start(struct ifnet *ifp) 357 { 358 struct sf_softc *sc = ifp->if_softc; 359 struct mbuf *m0, *m; 360 struct sf_txdesc0 *txd; 361 struct sf_descsoft *ds; 362 bus_dmamap_t dmamap; 363 int error, producer, last, opending, seg; 364 365 /* 366 * Remember the previous number of pending transmits. 367 */ 368 opending = sc->sc_txpending; 369 370 /* 371 * Find out where we're sitting. 372 */ 373 producer = SF_TXDINDEX_TO_HOST( 374 TDQPI_HiPrTxProducerIndex_get( 375 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex))); 376 377 /* 378 * Loop through the send queue, setting up transmit descriptors 379 * until we drain the queue, or use up all available transmit 380 * descriptors. Leave a blank one at the end for sanity's sake. 381 */ 382 while (sc->sc_txpending < (SF_NTXDESC - 1)) { 383 /* 384 * Grab a packet off the queue. 385 */ 386 IFQ_POLL(&ifp->if_snd, m0); 387 if (m0 == NULL) 388 break; 389 m = NULL; 390 391 /* 392 * Get the transmit descriptor. 393 */ 394 txd = &sc->sc_txdescs[producer]; 395 ds = &sc->sc_txsoft[producer]; 396 dmamap = ds->ds_dmamap; 397 398 /* 399 * Load the DMA map. If this fails, the packet either 400 * didn't fit in the allotted number of frags, or we were 401 * short on resources. In this case, we'll copy and try 402 * again. 403 */ 404 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 405 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 406 MGETHDR(m, M_DONTWAIT, MT_DATA); 407 if (m == NULL) { 408 printf("%s: unable to allocate Tx mbuf\n", 409 sc->sc_dev.dv_xname); 410 break; 411 } 412 if (m0->m_pkthdr.len > MHLEN) { 413 MCLGET(m, M_DONTWAIT); 414 if ((m->m_flags & M_EXT) == 0) { 415 printf("%s: unable to allocate Tx " 416 "cluster\n", sc->sc_dev.dv_xname); 417 m_freem(m); 418 break; 419 } 420 } 421 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 422 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 423 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 424 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 425 if (error) { 426 printf("%s: unable to load Tx buffer, " 427 "error = %d\n", sc->sc_dev.dv_xname, error); 428 break; 429 } 430 } 431 432 /* 433 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 434 */ 435 IFQ_DEQUEUE(&ifp->if_snd, m0); 436 if (m != NULL) { 437 m_freem(m0); 438 m0 = m; 439 } 440 441 /* Initialize the descriptor. */ 442 txd->td_word0 = 443 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len); 444 if (producer == (SF_NTXDESC - 1)) 445 txd->td_word0 |= TD_W0_END; 446 txd->td_word1 = htole32(dmamap->dm_nsegs); 447 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 448 txd->td_frags[seg].fr_addr = 449 htole32(dmamap->dm_segs[seg].ds_addr); 450 txd->td_frags[seg].fr_len = 451 htole32(dmamap->dm_segs[seg].ds_len); 452 } 453 454 /* Sync the descriptor and the DMA map. */ 455 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE); 456 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 457 BUS_DMASYNC_PREWRITE); 458 459 /* 460 * Store a pointer to the packet so we can free it later. 461 */ 462 ds->ds_mbuf = m0; 463 464 /* Advance the Tx pointer. */ 465 sc->sc_txpending++; 466 last = producer; 467 producer = SF_NEXTTX(producer); 468 469 #if NBPFILTER > 0 470 /* 471 * Pass the packet to any BPF listeners. 472 */ 473 if (ifp->if_bpf) 474 bpf_mtap(ifp->if_bpf, m0); 475 #endif 476 } 477 478 if (sc->sc_txpending == (SF_NTXDESC - 1)) { 479 /* No more slots left; notify upper layer. */ 480 ifp->if_flags |= IFF_OACTIVE; 481 } 482 483 if (sc->sc_txpending != opending) { 484 /* 485 * We enqueued packets. Cause a transmit interrupt to 486 * happen on the last packet we enqueued, and give the 487 * new descriptors to the chip by writing the new 488 * producer index. 489 */ 490 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR; 491 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE); 492 493 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, 494 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer))); 495 496 /* Set a watchdog timer in case the chip flakes out. */ 497 ifp->if_timer = 5; 498 } 499 } 500 501 /* 502 * sf_watchdog: [ifnet interface function] 503 * 504 * Watchdog timer handler. 505 */ 506 void 507 sf_watchdog(struct ifnet *ifp) 508 { 509 struct sf_softc *sc = ifp->if_softc; 510 511 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 512 ifp->if_oerrors++; 513 514 (void) sf_init(ifp); 515 516 /* Try to get more packets going. */ 517 sf_start(ifp); 518 } 519 520 /* 521 * sf_ioctl: [ifnet interface function] 522 * 523 * Handle control requests from the operator. 524 */ 525 int 526 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 527 { 528 struct sf_softc *sc = ifp->if_softc; 529 struct ifreq *ifr = (struct ifreq *) data; 530 int s, error; 531 532 s = splnet(); 533 534 switch (cmd) { 535 case SIOCSIFMEDIA: 536 case SIOCGIFMEDIA: 537 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 538 break; 539 540 default: 541 error = ether_ioctl(ifp, cmd, data); 542 if (error == ENETRESET) { 543 /* 544 * Multicast list has changed; set the hardware filter 545 * accordingly. 546 */ 547 sf_set_filter(sc); 548 error = 0; 549 } 550 break; 551 } 552 553 /* Try to get more packets going. */ 554 sf_start(ifp); 555 556 splx(s); 557 return (error); 558 } 559 560 /* 561 * sf_intr: 562 * 563 * Interrupt service routine. 564 */ 565 int 566 sf_intr(void *arg) 567 { 568 struct sf_softc *sc = arg; 569 uint32_t isr; 570 int handled = 0, wantinit = 0; 571 572 for (;;) { 573 /* Reading clears all interrupts we're interested in. */ 574 isr = sf_funcreg_read(sc, SF_InterruptStatus); 575 if ((isr & IS_PCIPadInt) == 0) 576 break; 577 578 handled = 1; 579 580 /* Handle receive interrupts. */ 581 if (isr & IS_RxQ1DoneInt) 582 sf_rxintr(sc); 583 584 /* Handle transmit completion interrupts. */ 585 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt)) 586 sf_txintr(sc); 587 588 /* Handle abnormal interrupts. */ 589 if (isr & IS_AbnormalInterrupt) { 590 /* Statistics. */ 591 if (isr & IS_StatisticWrapInt) 592 sf_stats_update(sc); 593 594 /* DMA errors. */ 595 if (isr & IS_DmaErrInt) { 596 wantinit = 1; 597 printf("%s: WARNING: DMA error\n", 598 sc->sc_dev.dv_xname); 599 } 600 601 /* Transmit FIFO underruns. */ 602 if (isr & IS_TxDataLowInt) { 603 if (sc->sc_txthresh < 0xff) 604 sc->sc_txthresh++; 605 printf("%s: transmit FIFO underrun, new " 606 "threshold: %d bytes\n", 607 sc->sc_dev.dv_xname, 608 sc->sc_txthresh * 16); 609 sf_funcreg_write(sc, SF_TransmitFrameCSR, 610 sc->sc_TransmitFrameCSR | 611 TFCSR_TransmitThreshold(sc->sc_txthresh)); 612 sf_funcreg_write(sc, SF_TxDescQueueCtrl, 613 sc->sc_TxDescQueueCtrl | 614 TDQC_TxHighPriorityFifoThreshold( 615 sc->sc_txthresh)); 616 } 617 } 618 } 619 620 if (handled) { 621 /* Reset the interface, if necessary. */ 622 if (wantinit) 623 sf_init(&sc->sc_ethercom.ec_if); 624 625 /* Try and get more packets going. */ 626 sf_start(&sc->sc_ethercom.ec_if); 627 } 628 629 return (handled); 630 } 631 632 /* 633 * sf_txintr: 634 * 635 * Helper -- handle transmit completion interrupts. 636 */ 637 void 638 sf_txintr(struct sf_softc *sc) 639 { 640 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 641 struct sf_descsoft *ds; 642 uint32_t cqci, tcd; 643 int consumer, producer, txidx; 644 645 try_again: 646 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); 647 648 consumer = CQCI_TxCompletionConsumerIndex_get(cqci); 649 producer = CQPI_TxCompletionProducerIndex_get( 650 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); 651 652 if (consumer == producer) 653 return; 654 655 ifp->if_flags &= ~IFF_OACTIVE; 656 657 while (consumer != producer) { 658 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD); 659 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0); 660 661 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd)); 662 #ifdef DIAGNOSTIC 663 if ((tcd & TCD_PR) == 0) 664 printf("%s: Tx queue mismatch, index %d\n", 665 sc->sc_dev.dv_xname, txidx); 666 #endif 667 /* 668 * NOTE: stats are updated later. We're just 669 * releasing packets that have been DMA'd to 670 * the chip. 671 */ 672 ds = &sc->sc_txsoft[txidx]; 673 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE); 674 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 675 0, ds->ds_dmamap->dm_mapsize, 676 BUS_DMASYNC_POSTWRITE); 677 m_freem(ds->ds_mbuf); 678 ds->ds_mbuf = NULL; 679 680 consumer = SF_NEXTTCD(consumer); 681 sc->sc_txpending--; 682 } 683 684 /* XXXJRT -- should be KDASSERT() */ 685 KASSERT(sc->sc_txpending >= 0); 686 687 /* If all packets are done, cancel the watchdog timer. */ 688 if (sc->sc_txpending == 0) 689 ifp->if_timer = 0; 690 691 /* Update the consumer index. */ 692 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 693 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) | 694 CQCI_TxCompletionConsumerIndex(consumer)); 695 696 /* Double check for new completions. */ 697 goto try_again; 698 } 699 700 /* 701 * sf_rxintr: 702 * 703 * Helper -- handle receive interrupts. 704 */ 705 void 706 sf_rxintr(struct sf_softc *sc) 707 { 708 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 709 struct sf_descsoft *ds; 710 struct sf_rcd_full *rcd; 711 struct mbuf *m; 712 uint32_t cqci, word0; 713 int consumer, producer, bufproducer, rxidx, len; 714 715 try_again: 716 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); 717 718 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci); 719 producer = CQPI_RxCompletionQ1ProducerIndex_get( 720 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); 721 bufproducer = RXQ1P_RxDescQ1Producer_get( 722 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs)); 723 724 if (consumer == producer) 725 return; 726 727 while (consumer != producer) { 728 rcd = &sc->sc_rxcomp[consumer]; 729 SF_CDRXCSYNC(sc, consumer, 730 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 731 SF_CDRXCSYNC(sc, consumer, 732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 733 734 word0 = le32toh(rcd->rcd_word0); 735 rxidx = RCD_W0_EndIndex(word0); 736 737 ds = &sc->sc_rxsoft[rxidx]; 738 739 consumer = SF_NEXTRCD(consumer); 740 bufproducer = SF_NEXTRX(bufproducer); 741 742 if ((word0 & RCD_W0_OK) == 0) { 743 SF_INIT_RXDESC(sc, rxidx); 744 continue; 745 } 746 747 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 748 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 749 750 /* 751 * No errors; receive the packet. Note that we have 752 * configured the Starfire to NOT transfer the CRC 753 * with the packet. 754 */ 755 len = RCD_W0_Length(word0); 756 757 #ifdef __NO_STRICT_ALIGNMENT 758 /* 759 * Allocate a new mbuf cluster. If that fails, we are 760 * out of memory, and must drop the packet and recycle 761 * the buffer that's already attached to this descriptor. 762 */ 763 m = ds->ds_mbuf; 764 if (sf_add_rxbuf(sc, rxidx) != 0) { 765 ifp->if_ierrors++; 766 SF_INIT_RXDESC(sc, rxidx); 767 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 768 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 769 continue; 770 } 771 #else 772 /* 773 * The Starfire's receive buffer must be 4-byte aligned. 774 * But this means that the data after the Ethernet header 775 * is misaligned. We must allocate a new buffer and 776 * copy the data, shifted forward 2 bytes. 777 */ 778 MGETHDR(m, M_DONTWAIT, MT_DATA); 779 if (m == NULL) { 780 dropit: 781 ifp->if_ierrors++; 782 SF_INIT_RXDESC(sc, rxidx); 783 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 784 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 785 continue; 786 } 787 if (len > (MHLEN - 2)) { 788 MCLGET(m, M_DONTWAIT); 789 if ((m->m_flags & M_EXT) == 0) { 790 m_freem(m); 791 goto dropit; 792 } 793 } 794 m->m_data += 2; 795 796 /* 797 * Note that we use cluster for incoming frames, so the 798 * buffer is virtually contiguous. 799 */ 800 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len); 801 802 /* Allow the receive descriptor to continue using its mbuf. */ 803 SF_INIT_RXDESC(sc, rxidx); 804 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 805 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 806 #endif /* __NO_STRICT_ALIGNMENT */ 807 808 m->m_pkthdr.rcvif = ifp; 809 m->m_pkthdr.len = m->m_len = len; 810 811 #if NBPFILTER > 0 812 /* 813 * Pass this up to any BPF listeners. 814 */ 815 if (ifp->if_bpf) 816 bpf_mtap(ifp->if_bpf, m); 817 #endif /* NBPFILTER > 0 */ 818 819 /* Pass it on. */ 820 (*ifp->if_input)(ifp, m); 821 } 822 823 /* Update the chip's pointers. */ 824 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 825 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) | 826 CQCI_RxCompletionQ1ConsumerIndex(consumer)); 827 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, 828 RXQ1P_RxDescQ1Producer(bufproducer)); 829 830 /* Double-check for any new completions. */ 831 goto try_again; 832 } 833 834 /* 835 * sf_tick: 836 * 837 * One second timer, used to tick the MII and update stats. 838 */ 839 void 840 sf_tick(void *arg) 841 { 842 struct sf_softc *sc = arg; 843 int s; 844 845 s = splnet(); 846 mii_tick(&sc->sc_mii); 847 sf_stats_update(sc); 848 splx(s); 849 850 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc); 851 } 852 853 /* 854 * sf_stats_update: 855 * 856 * Read the statitistics counters. 857 */ 858 void 859 sf_stats_update(struct sf_softc *sc) 860 { 861 struct sf_stats stats; 862 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 863 uint32_t *p; 864 u_int i; 865 866 p = &stats.TransmitOKFrames; 867 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) { 868 *p++ = sf_genreg_read(sc, 869 SF_STATS_BASE + (i * sizeof(uint32_t))); 870 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0); 871 } 872 873 ifp->if_opackets += stats.TransmitOKFrames; 874 875 ifp->if_collisions += stats.SingleCollisionFrames + 876 stats.MultipleCollisionFrames; 877 878 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions + 879 stats.TransmitAbortDueToExcessingDeferral + 880 stats.FramesLostDueToInternalTransmitErrors; 881 882 ifp->if_ipackets += stats.ReceiveOKFrames; 883 884 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors + 885 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort + 886 stats.ReceiveFramesJabbersError + 887 stats.FramesLostDueToInternalReceiveErrors; 888 } 889 890 /* 891 * sf_reset: 892 * 893 * Perform a soft reset on the Starfire. 894 */ 895 void 896 sf_reset(struct sf_softc *sc) 897 { 898 int i; 899 900 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); 901 902 sf_macreset(sc); 903 904 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset); 905 for (i = 0; i < 1000; i++) { 906 delay(10); 907 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) & 908 PDC_SoftReset) == 0) 909 break; 910 } 911 912 if (i == 1000) { 913 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 914 sf_funcreg_write(sc, SF_PciDeviceConfig, 0); 915 } 916 917 delay(1000); 918 } 919 920 /* 921 * sf_macreset: 922 * 923 * Reset the MAC portion of the Starfire. 924 */ 925 void 926 sf_macreset(struct sf_softc *sc) 927 { 928 929 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst); 930 delay(1000); 931 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); 932 } 933 934 /* 935 * sf_init: [ifnet interface function] 936 * 937 * Initialize the interface. Must be called at splnet(). 938 */ 939 int 940 sf_init(struct ifnet *ifp) 941 { 942 struct sf_softc *sc = ifp->if_softc; 943 struct sf_descsoft *ds; 944 int error = 0; 945 u_int i; 946 947 /* 948 * Cancel any pending I/O. 949 */ 950 sf_stop(ifp, 0); 951 952 /* 953 * Reset the Starfire to a known state. 954 */ 955 sf_reset(sc); 956 957 /* Clear the stat counters. */ 958 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t)) 959 sf_genreg_write(sc, SF_STATS_BASE + i, 0); 960 961 /* 962 * Initialize the transmit descriptor ring. 963 */ 964 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 965 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0); 966 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0)); 967 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0); 968 969 /* 970 * Initialize the transmit completion ring. 971 */ 972 for (i = 0; i < SF_NTCD; i++) { 973 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID; 974 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 975 } 976 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0); 977 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0)); 978 979 /* 980 * Initialize the receive descriptor ring. 981 */ 982 for (i = 0; i < SF_NRXDESC; i++) { 983 ds = &sc->sc_rxsoft[i]; 984 if (ds->ds_mbuf == NULL) { 985 if ((error = sf_add_rxbuf(sc, i)) != 0) { 986 printf("%s: unable to allocate or map rx " 987 "buffer %d, error = %d\n", 988 sc->sc_dev.dv_xname, i, error); 989 /* 990 * XXX Should attempt to run with fewer receive 991 * XXX buffers instead of just failing. 992 */ 993 sf_rxdrain(sc); 994 goto out; 995 } 996 } else 997 SF_INIT_RXDESC(sc, i); 998 } 999 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0); 1000 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0)); 1001 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0); 1002 1003 /* 1004 * Initialize the receive completion ring. 1005 */ 1006 for (i = 0; i < SF_NRCD; i++) { 1007 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID; 1008 sc->sc_rxcomp[i].rcd_word1 = 0; 1009 sc->sc_rxcomp[i].rcd_word2 = 0; 1010 sc->sc_rxcomp[i].rcd_timestamp = 0; 1011 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1012 } 1013 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) | 1014 RCQ1C_RxCompletionQ1Type(3)); 1015 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0); 1016 1017 /* 1018 * Initialize the Tx CSR. 1019 */ 1020 sc->sc_TransmitFrameCSR = 0; 1021 sf_funcreg_write(sc, SF_TransmitFrameCSR, 1022 sc->sc_TransmitFrameCSR | 1023 TFCSR_TransmitThreshold(sc->sc_txthresh)); 1024 1025 /* 1026 * Initialize the Tx descriptor control register. 1027 */ 1028 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) | 1029 TDQC_TxDmaBurstSize(4) | /* default */ 1030 TDQC_MinFrameSpacing(3) | /* 128 bytes */ 1031 TDQC_TxDescType(0); 1032 sf_funcreg_write(sc, SF_TxDescQueueCtrl, 1033 sc->sc_TxDescQueueCtrl | 1034 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh)); 1035 1036 /* 1037 * Initialize the Rx descriptor control registers. 1038 */ 1039 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl, 1040 RDQ1C_RxQ1BufferLength(MCLBYTES) | 1041 RDQ1C_RxDescSpacing(0)); 1042 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0); 1043 1044 /* 1045 * Initialize the Tx descriptor producer indices. 1046 */ 1047 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, 1048 TDQPI_HiPrTxProducerIndex(0) | 1049 TDQPI_LoPrTxProducerIndex(0)); 1050 1051 /* 1052 * Initialize the Rx descriptor producer indices. 1053 */ 1054 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, 1055 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1)); 1056 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs, 1057 RXQ2P_RxDescQ2Producer(0)); 1058 1059 /* 1060 * Initialize the Tx and Rx completion queue consumer indices. 1061 */ 1062 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 1063 CQCI_TxCompletionConsumerIndex(0) | 1064 CQCI_RxCompletionQ1ConsumerIndex(0)); 1065 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0); 1066 1067 /* 1068 * Initialize the Rx DMA control register. 1069 */ 1070 sf_funcreg_write(sc, SF_RxDmaCtrl, 1071 RDC_RxHighPriorityThreshold(6) | /* default */ 1072 RDC_RxBurstSize(4)); /* default */ 1073 1074 /* 1075 * Set the receive filter. 1076 */ 1077 sc->sc_RxAddressFilteringCtl = 0; 1078 sf_set_filter(sc); 1079 1080 /* 1081 * Set MacConfig1. When we set the media, MacConfig1 will 1082 * actually be written and the MAC part reset. 1083 */ 1084 sc->sc_MacConfig1 = MC1_PadEn; 1085 1086 /* 1087 * Set the media. 1088 */ 1089 mii_mediachg(&sc->sc_mii); 1090 1091 /* 1092 * Initialize the interrupt register. 1093 */ 1094 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt | 1095 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt | 1096 IS_StatisticWrapInt; 1097 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn); 1098 1099 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable | 1100 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT)); 1101 1102 /* 1103 * Start the transmit and receive processes. 1104 */ 1105 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 1106 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn); 1107 1108 /* Start the on second clock. */ 1109 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc); 1110 1111 /* 1112 * Note that the interface is now running. 1113 */ 1114 ifp->if_flags |= IFF_RUNNING; 1115 ifp->if_flags &= ~IFF_OACTIVE; 1116 1117 out: 1118 if (error) { 1119 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1120 ifp->if_timer = 0; 1121 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1122 } 1123 return (error); 1124 } 1125 1126 /* 1127 * sf_rxdrain: 1128 * 1129 * Drain the receive queue. 1130 */ 1131 void 1132 sf_rxdrain(struct sf_softc *sc) 1133 { 1134 struct sf_descsoft *ds; 1135 int i; 1136 1137 for (i = 0; i < SF_NRXDESC; i++) { 1138 ds = &sc->sc_rxsoft[i]; 1139 if (ds->ds_mbuf != NULL) { 1140 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1141 m_freem(ds->ds_mbuf); 1142 ds->ds_mbuf = NULL; 1143 } 1144 } 1145 } 1146 1147 /* 1148 * sf_stop: [ifnet interface function] 1149 * 1150 * Stop transmission on the interface. 1151 */ 1152 void 1153 sf_stop(struct ifnet *ifp, int disable) 1154 { 1155 struct sf_softc *sc = ifp->if_softc; 1156 struct sf_descsoft *ds; 1157 int i; 1158 1159 /* Stop the one second clock. */ 1160 callout_stop(&sc->sc_tick_callout); 1161 1162 /* Down the MII. */ 1163 mii_down(&sc->sc_mii); 1164 1165 /* Disable interrupts. */ 1166 sf_funcreg_write(sc, SF_InterruptEn, 0); 1167 1168 /* Stop the transmit and receive processes. */ 1169 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); 1170 1171 /* 1172 * Release any queued transmit buffers. 1173 */ 1174 for (i = 0; i < SF_NTXDESC; i++) { 1175 ds = &sc->sc_txsoft[i]; 1176 if (ds->ds_mbuf != NULL) { 1177 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1178 m_freem(ds->ds_mbuf); 1179 ds->ds_mbuf = NULL; 1180 } 1181 } 1182 1183 if (disable) 1184 sf_rxdrain(sc); 1185 1186 /* 1187 * Mark the interface down and cancel the watchdog timer. 1188 */ 1189 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1190 ifp->if_timer = 0; 1191 } 1192 1193 /* 1194 * sf_read_eeprom: 1195 * 1196 * Read from the Starfire EEPROM. 1197 */ 1198 uint8_t 1199 sf_read_eeprom(struct sf_softc *sc, int offset) 1200 { 1201 uint32_t reg; 1202 1203 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3)); 1204 1205 return ((reg >> (8 * (offset & 3))) & 0xff); 1206 } 1207 1208 /* 1209 * sf_add_rxbuf: 1210 * 1211 * Add a receive buffer to the indicated descriptor. 1212 */ 1213 int 1214 sf_add_rxbuf(struct sf_softc *sc, int idx) 1215 { 1216 struct sf_descsoft *ds = &sc->sc_rxsoft[idx]; 1217 struct mbuf *m; 1218 int error; 1219 1220 MGETHDR(m, M_DONTWAIT, MT_DATA); 1221 if (m == NULL) 1222 return (ENOBUFS); 1223 1224 MCLGET(m, M_DONTWAIT); 1225 if ((m->m_flags & M_EXT) == 0) { 1226 m_freem(m); 1227 return (ENOBUFS); 1228 } 1229 1230 if (ds->ds_mbuf != NULL) 1231 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1232 1233 ds->ds_mbuf = m; 1234 1235 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1236 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1237 BUS_DMA_READ|BUS_DMA_NOWAIT); 1238 if (error) { 1239 printf("%s: can't load rx DMA map %d, error = %d\n", 1240 sc->sc_dev.dv_xname, idx, error); 1241 panic("sf_add_rxbuf"); /* XXX */ 1242 } 1243 1244 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1245 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1246 1247 SF_INIT_RXDESC(sc, idx); 1248 1249 return (0); 1250 } 1251 1252 static void 1253 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr) 1254 { 1255 uint32_t reg0, reg1, reg2; 1256 1257 reg0 = enaddr[5] | (enaddr[4] << 8); 1258 reg1 = enaddr[3] | (enaddr[2] << 8); 1259 reg2 = enaddr[1] | (enaddr[0] << 8); 1260 1261 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0); 1262 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1); 1263 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2); 1264 } 1265 1266 static void 1267 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr) 1268 { 1269 uint32_t hash, slot, reg; 1270 1271 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23; 1272 slot = hash >> 4; 1273 1274 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10)); 1275 reg |= 1 << (hash & 0xf); 1276 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg); 1277 } 1278 1279 /* 1280 * sf_set_filter: 1281 * 1282 * Set the Starfire receive filter. 1283 */ 1284 void 1285 sf_set_filter(struct sf_softc *sc) 1286 { 1287 struct ethercom *ec = &sc->sc_ethercom; 1288 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1289 struct ether_multi *enm; 1290 struct ether_multistep step; 1291 int i; 1292 1293 /* Start by clearing the perfect and hash tables. */ 1294 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t)) 1295 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0); 1296 1297 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t)) 1298 sf_genreg_write(sc, SF_HASH_BASE + i, 0); 1299 1300 /* 1301 * Clear the perfect and hash mode bits. 1302 */ 1303 sc->sc_RxAddressFilteringCtl &= 1304 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3)); 1305 1306 if (ifp->if_flags & IFF_BROADCAST) 1307 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast; 1308 else 1309 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast; 1310 1311 if (ifp->if_flags & IFF_PROMISC) { 1312 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode; 1313 goto allmulti; 1314 } else 1315 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode; 1316 1317 /* 1318 * Set normal perfect filtering mode. 1319 */ 1320 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1); 1321 1322 /* 1323 * First, write the station address to the perfect filter 1324 * table. 1325 */ 1326 sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl)); 1327 1328 /* 1329 * Now set the hash bits for each multicast address in our 1330 * list. 1331 */ 1332 ETHER_FIRST_MULTI(step, ec, enm); 1333 if (enm == NULL) 1334 goto done; 1335 while (enm != NULL) { 1336 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1337 /* 1338 * We must listen to a range of multicast addresses. 1339 * For now, just accept all multicasts, rather than 1340 * trying to set only those filter bits needed to match 1341 * the range. (At this time, the only use of address 1342 * ranges is for IP multicast routing, for which the 1343 * range is big enough to require all bits set.) 1344 */ 1345 goto allmulti; 1346 } 1347 sf_set_filter_hash(sc, enm->enm_addrlo); 1348 ETHER_NEXT_MULTI(step, enm); 1349 } 1350 1351 /* 1352 * Set "hash only multicast dest, match regardless of VLAN ID". 1353 */ 1354 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2); 1355 goto done; 1356 1357 allmulti: 1358 /* 1359 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode. 1360 */ 1361 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast; 1362 ifp->if_flags |= IFF_ALLMULTI; 1363 1364 done: 1365 sf_funcreg_write(sc, SF_RxAddressFilteringCtl, 1366 sc->sc_RxAddressFilteringCtl); 1367 } 1368 1369 /* 1370 * sf_mii_read: [mii interface function] 1371 * 1372 * Read from the MII. 1373 */ 1374 int 1375 sf_mii_read(struct device *self, int phy, int reg) 1376 { 1377 struct sf_softc *sc = (void *) self; 1378 uint32_t v; 1379 int i; 1380 1381 for (i = 0; i < 1000; i++) { 1382 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)); 1383 if (v & MiiDataValid) 1384 break; 1385 delay(1); 1386 } 1387 1388 if ((v & MiiDataValid) == 0) 1389 return (0); 1390 1391 if (MiiRegDataPort(v) == 0xffff) 1392 return (0); 1393 1394 return (MiiRegDataPort(v)); 1395 } 1396 1397 /* 1398 * sf_mii_write: [mii interface function] 1399 * 1400 * Write to the MII. 1401 */ 1402 void 1403 sf_mii_write(struct device *self, int phy, int reg, int val) 1404 { 1405 struct sf_softc *sc = (void *) self; 1406 int i; 1407 1408 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val); 1409 1410 for (i = 0; i < 1000; i++) { 1411 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) & 1412 MiiBusy) == 0) 1413 return; 1414 delay(1); 1415 } 1416 1417 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname); 1418 } 1419 1420 /* 1421 * sf_mii_statchg: [mii interface function] 1422 * 1423 * Callback from the PHY when the media changes. 1424 */ 1425 void 1426 sf_mii_statchg(struct device *self) 1427 { 1428 struct sf_softc *sc = (void *) self; 1429 uint32_t ipg; 1430 1431 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1432 sc->sc_MacConfig1 |= MC1_FullDuplex; 1433 ipg = 0x15; 1434 } else { 1435 sc->sc_MacConfig1 &= ~MC1_FullDuplex; 1436 ipg = 0x11; 1437 } 1438 1439 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); 1440 sf_macreset(sc); 1441 1442 sf_genreg_write(sc, SF_BkToBkIPG, ipg); 1443 } 1444 1445 /* 1446 * sf_mediastatus: [ifmedia interface function] 1447 * 1448 * Callback from ifmedia to request current media status. 1449 */ 1450 void 1451 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1452 { 1453 struct sf_softc *sc = ifp->if_softc; 1454 1455 mii_pollstat(&sc->sc_mii); 1456 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1457 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1458 } 1459 1460 /* 1461 * sf_mediachange: [ifmedia interface function] 1462 * 1463 * Callback from ifmedia to request new media setting. 1464 */ 1465 int 1466 sf_mediachange(struct ifnet *ifp) 1467 { 1468 struct sf_softc *sc = ifp->if_softc; 1469 1470 if (ifp->if_flags & IFF_UP) 1471 mii_mediachg(&sc->sc_mii); 1472 return (0); 1473 } 1474