1 /* $NetBSD: aic6915.c,v 1.45 2022/09/25 18:43:32 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the Adaptec AIC-6915 (``Starfire'') 34 * 10/100 Ethernet controller. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.45 2022/09/25 18:43:32 thorpej Exp $"); 39 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/callout.h> 44 #include <sys/mbuf.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_ether.h> 55 56 #include <net/bpf.h> 57 58 #include <sys/bus.h> 59 #include <sys/intr.h> 60 61 #include <dev/mii/miivar.h> 62 63 #include <dev/ic/aic6915reg.h> 64 #include <dev/ic/aic6915var.h> 65 66 static void sf_start(struct ifnet *); 67 static void sf_watchdog(struct ifnet *); 68 static int sf_ioctl(struct ifnet *, u_long, void *); 69 static int sf_init(struct ifnet *); 70 static void sf_stop(struct ifnet *, int); 71 72 static bool sf_shutdown(device_t, int); 73 74 static void sf_txintr(struct sf_softc *); 75 static void sf_rxintr(struct sf_softc *); 76 static void sf_stats_update(struct sf_softc *); 77 78 static void sf_reset(struct sf_softc *); 79 static void sf_macreset(struct sf_softc *); 80 static void sf_rxdrain(struct sf_softc *); 81 static int sf_add_rxbuf(struct sf_softc *, int); 82 static uint8_t sf_read_eeprom(struct sf_softc *, int); 83 static void sf_set_filter(struct sf_softc *); 84 85 static int sf_mii_read(device_t, int, int, uint16_t *); 86 static int sf_mii_write(device_t, int, int, uint16_t); 87 static void sf_mii_statchg(struct ifnet *); 88 89 static void sf_tick(void *); 90 91 #define sf_funcreg_read(sc, reg) \ 92 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg)) 93 #define sf_funcreg_write(sc, reg, val) \ 94 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val)) 95 96 static inline uint32_t 97 sf_reg_read(struct sf_softc *sc, bus_addr_t reg) 98 { 99 100 if (__predict_false(sc->sc_iomapped)) { 101 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, 102 reg); 103 return (bus_space_read_4(sc->sc_st, sc->sc_sh, 104 SF_IndirectIoDataPort)); 105 } 106 107 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg)); 108 } 109 110 static inline void 111 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val) 112 { 113 114 if (__predict_false(sc->sc_iomapped)) { 115 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, 116 reg); 117 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort, 118 val); 119 return; 120 } 121 122 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val); 123 } 124 125 #define sf_genreg_read(sc, reg) \ 126 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET) 127 #define sf_genreg_write(sc, reg, val) \ 128 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val)) 129 130 /* 131 * sf_attach: 132 * 133 * Attach a Starfire interface to the system. 134 */ 135 void 136 sf_attach(struct sf_softc *sc) 137 { 138 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 139 struct mii_data * const mii = &sc->sc_mii; 140 int i, rseg, error; 141 bus_dma_segment_t seg; 142 uint8_t enaddr[ETHER_ADDR_LEN]; 143 144 callout_init(&sc->sc_tick_callout, 0); 145 callout_setfunc(&sc->sc_tick_callout, sf_tick, sc); 146 147 /* 148 * If we're I/O mapped, the functional register handle is 149 * the same as the base handle. If we're memory mapped, 150 * carve off a chunk of the register space for the functional 151 * registers, to save on arithmetic later. 152 */ 153 if (sc->sc_iomapped) 154 sc->sc_sh_func = sc->sc_sh; 155 else { 156 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh, 157 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) { 158 aprint_error_dev(sc->sc_dev, "unable to sub-region " 159 "functional registers, error = %d\n", error); 160 return; 161 } 162 } 163 164 /* 165 * Initialize the transmit threshold for this interface. The 166 * manual describes the default as 4 * 16 bytes. We start out 167 * at 10 * 16 bytes, to avoid a bunch of initial underruns on 168 * several platforms. 169 */ 170 sc->sc_txthresh = 10; 171 172 /* 173 * Allocate the control data structures, and create and load the 174 * DMA map for it. 175 */ 176 if ((error = bus_dmamem_alloc(sc->sc_dmat, 177 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 178 BUS_DMA_NOWAIT)) != 0) { 179 aprint_error_dev(sc->sc_dev, 180 "unable to allocate control data, error = %d\n", error); 181 goto fail_0; 182 } 183 184 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 185 sizeof(struct sf_control_data), (void **)&sc->sc_control_data, 186 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 187 aprint_error_dev(sc->sc_dev, 188 "unable to map control data, error = %d\n", error); 189 goto fail_1; 190 } 191 192 if ((error = bus_dmamap_create(sc->sc_dmat, 193 sizeof(struct sf_control_data), 1, 194 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT, 195 &sc->sc_cddmamap)) != 0) { 196 aprint_error_dev(sc->sc_dev, "unable to create control data " 197 "DMA map, error = %d\n", error); 198 goto fail_2; 199 } 200 201 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 202 sc->sc_control_data, sizeof(struct sf_control_data), NULL, 203 BUS_DMA_NOWAIT)) != 0) { 204 aprint_error_dev(sc->sc_dev, "unable to load control data " 205 "DMA map, error = %d\n", error); 206 goto fail_3; 207 } 208 209 /* 210 * Create the transmit buffer DMA maps. 211 */ 212 for (i = 0; i < SF_NTXDESC; i++) { 213 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 214 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 215 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 216 aprint_error_dev(sc->sc_dev, 217 "unable to create tx DMA map %d, error = %d\n", i, 218 error); 219 goto fail_4; 220 } 221 } 222 223 /* 224 * Create the receive buffer DMA maps. 225 */ 226 for (i = 0; i < SF_NRXDESC; i++) { 227 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 228 MCLBYTES, 0, BUS_DMA_NOWAIT, 229 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 230 aprint_error_dev(sc->sc_dev, 231 "unable to create rx DMA map %d, error = %d\n", i, 232 error); 233 goto fail_5; 234 } 235 } 236 237 /* 238 * Reset the chip to a known state. 239 */ 240 sf_reset(sc); 241 242 /* 243 * Read the Ethernet address from the EEPROM. 244 */ 245 for (i = 0; i < ETHER_ADDR_LEN; i++) 246 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i); 247 248 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev), 249 ether_sprintf(enaddr)); 250 251 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64) 252 printf("%s: 64-bit PCI slot detected\n", 253 device_xname(sc->sc_dev)); 254 255 /* 256 * Initialize our media structures and probe the MII. 257 */ 258 mii->mii_ifp = ifp; 259 mii->mii_readreg = sf_mii_read; 260 mii->mii_writereg = sf_mii_write; 261 mii->mii_statchg = sf_mii_statchg; 262 sc->sc_ethercom.ec_mii = mii; 263 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 264 ether_mediastatus); 265 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 266 MII_OFFSET_ANY, 0); 267 if (LIST_FIRST(&mii->mii_phys) == NULL) { 268 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 269 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 270 } else 271 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 272 273 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 274 ifp->if_softc = sc; 275 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 276 ifp->if_ioctl = sf_ioctl; 277 ifp->if_start = sf_start; 278 ifp->if_watchdog = sf_watchdog; 279 ifp->if_init = sf_init; 280 ifp->if_stop = sf_stop; 281 IFQ_SET_READY(&ifp->if_snd); 282 283 /* 284 * Attach the interface. 285 */ 286 if_attach(ifp); 287 if_deferred_start_init(ifp, NULL); 288 ether_ifattach(ifp, enaddr); 289 290 /* 291 * Make sure the interface is shutdown during reboot. 292 */ 293 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown)) 294 pmf_class_network_register(sc->sc_dev, ifp); 295 else 296 aprint_error_dev(sc->sc_dev, 297 "couldn't establish power handler\n"); 298 return; 299 300 /* 301 * Free any resources we've allocated during the failed attach 302 * attempt. Do this in reverse order an fall through. 303 */ 304 fail_5: 305 for (i = 0; i < SF_NRXDESC; i++) { 306 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 307 bus_dmamap_destroy(sc->sc_dmat, 308 sc->sc_rxsoft[i].ds_dmamap); 309 } 310 fail_4: 311 for (i = 0; i < SF_NTXDESC; i++) { 312 if (sc->sc_txsoft[i].ds_dmamap != NULL) 313 bus_dmamap_destroy(sc->sc_dmat, 314 sc->sc_txsoft[i].ds_dmamap); 315 } 316 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 317 fail_3: 318 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 319 fail_2: 320 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data, 321 sizeof(struct sf_control_data)); 322 fail_1: 323 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 324 fail_0: 325 return; 326 } 327 328 /* 329 * sf_shutdown: 330 * 331 * Shutdown hook -- make sure the interface is stopped at reboot. 332 */ 333 static bool 334 sf_shutdown(device_t self, int howto) 335 { 336 struct sf_softc *sc; 337 338 sc = device_private(self); 339 sf_stop(&sc->sc_ethercom.ec_if, 1); 340 341 return true; 342 } 343 344 /* 345 * sf_start: [ifnet interface function] 346 * 347 * Start packet transmission on the interface. 348 */ 349 static void 350 sf_start(struct ifnet *ifp) 351 { 352 struct sf_softc *sc = ifp->if_softc; 353 struct mbuf *m0, *m; 354 struct sf_txdesc0 *txd; 355 struct sf_descsoft *ds; 356 bus_dmamap_t dmamap; 357 int error, producer, last = -1, opending, seg; 358 359 /* 360 * Remember the previous number of pending transmits. 361 */ 362 opending = sc->sc_txpending; 363 364 /* 365 * Find out where we're sitting. 366 */ 367 producer = SF_TXDINDEX_TO_HOST( 368 TDQPI_HiPrTxProducerIndex_get( 369 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex))); 370 371 /* 372 * Loop through the send queue, setting up transmit descriptors 373 * until we drain the queue, or use up all available transmit 374 * descriptors. Leave a blank one at the end for sanity's sake. 375 */ 376 while (sc->sc_txpending < (SF_NTXDESC - 1)) { 377 /* 378 * Grab a packet off the queue. 379 */ 380 IFQ_POLL(&ifp->if_snd, m0); 381 if (m0 == NULL) 382 break; 383 m = NULL; 384 385 /* 386 * Get the transmit descriptor. 387 */ 388 txd = &sc->sc_txdescs[producer]; 389 ds = &sc->sc_txsoft[producer]; 390 dmamap = ds->ds_dmamap; 391 392 /* 393 * Load the DMA map. If this fails, the packet either 394 * didn't fit in the allotted number of frags, or we were 395 * short on resources. In this case, we'll copy and try 396 * again. 397 */ 398 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 399 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 400 MGETHDR(m, M_DONTWAIT, MT_DATA); 401 if (m == NULL) { 402 aprint_error_dev(sc->sc_dev, 403 "unable to allocate Tx mbuf\n"); 404 break; 405 } 406 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 407 if (m0->m_pkthdr.len > MHLEN) { 408 MCLGET(m, M_DONTWAIT); 409 if ((m->m_flags & M_EXT) == 0) { 410 aprint_error_dev(sc->sc_dev, 411 "unable to allocate Tx cluster\n"); 412 m_freem(m); 413 break; 414 } 415 } 416 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 417 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 418 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 419 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 420 if (error) { 421 aprint_error_dev(sc->sc_dev, 422 "unable to load Tx buffer, error = %d\n", 423 error); 424 break; 425 } 426 } 427 428 /* 429 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 430 */ 431 IFQ_DEQUEUE(&ifp->if_snd, m0); 432 if (m != NULL) { 433 m_freem(m0); 434 m0 = m; 435 } 436 437 /* Initialize the descriptor. */ 438 txd->td_word0 = 439 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len); 440 if (producer == (SF_NTXDESC - 1)) 441 txd->td_word0 |= TD_W0_END; 442 txd->td_word1 = htole32(dmamap->dm_nsegs); 443 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 444 txd->td_frags[seg].fr_addr = 445 htole32(dmamap->dm_segs[seg].ds_addr); 446 txd->td_frags[seg].fr_len = 447 htole32(dmamap->dm_segs[seg].ds_len); 448 } 449 450 /* Sync the descriptor and the DMA map. */ 451 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE); 452 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 453 BUS_DMASYNC_PREWRITE); 454 455 /* 456 * Store a pointer to the packet so we can free it later. 457 */ 458 ds->ds_mbuf = m0; 459 460 /* Advance the Tx pointer. */ 461 sc->sc_txpending++; 462 last = producer; 463 producer = SF_NEXTTX(producer); 464 465 /* 466 * Pass the packet to any BPF listeners. 467 */ 468 bpf_mtap(ifp, m0, BPF_D_OUT); 469 } 470 471 if (sc->sc_txpending != opending) { 472 KASSERT(last != -1); 473 /* 474 * We enqueued packets. Cause a transmit interrupt to 475 * happen on the last packet we enqueued, and give the 476 * new descriptors to the chip by writing the new 477 * producer index. 478 */ 479 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR; 480 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE); 481 482 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, 483 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer))); 484 485 /* Set a watchdog timer in case the chip flakes out. */ 486 ifp->if_timer = 5; 487 } 488 } 489 490 /* 491 * sf_watchdog: [ifnet interface function] 492 * 493 * Watchdog timer handler. 494 */ 495 static void 496 sf_watchdog(struct ifnet *ifp) 497 { 498 struct sf_softc *sc = ifp->if_softc; 499 500 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 501 if_statinc(ifp, if_oerrors); 502 503 (void) sf_init(ifp); 504 505 /* Try to get more packets going. */ 506 sf_start(ifp); 507 } 508 509 /* 510 * sf_ioctl: [ifnet interface function] 511 * 512 * Handle control requests from the operator. 513 */ 514 static int 515 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data) 516 { 517 struct sf_softc *sc = ifp->if_softc; 518 int s, error; 519 520 s = splnet(); 521 522 error = ether_ioctl(ifp, cmd, data); 523 if (error == ENETRESET) { 524 /* 525 * Multicast list has changed; set the hardware filter 526 * accordingly. 527 */ 528 if (ifp->if_flags & IFF_RUNNING) 529 sf_set_filter(sc); 530 error = 0; 531 } 532 533 /* Try to get more packets going. */ 534 sf_start(ifp); 535 536 splx(s); 537 return (error); 538 } 539 540 /* 541 * sf_intr: 542 * 543 * Interrupt service routine. 544 */ 545 int 546 sf_intr(void *arg) 547 { 548 struct sf_softc *sc = arg; 549 uint32_t isr; 550 int handled = 0, wantinit = 0; 551 552 for (;;) { 553 /* Reading clears all interrupts we're interested in. */ 554 isr = sf_funcreg_read(sc, SF_InterruptStatus); 555 if ((isr & IS_PCIPadInt) == 0) 556 break; 557 558 handled = 1; 559 560 /* Handle receive interrupts. */ 561 if (isr & IS_RxQ1DoneInt) 562 sf_rxintr(sc); 563 564 /* Handle transmit completion interrupts. */ 565 if (isr & (IS_TxDmaDoneInt | IS_TxQueueDoneInt)) 566 sf_txintr(sc); 567 568 /* Handle abnormal interrupts. */ 569 if (isr & IS_AbnormalInterrupt) { 570 /* Statistics. */ 571 if (isr & IS_StatisticWrapInt) 572 sf_stats_update(sc); 573 574 /* DMA errors. */ 575 if (isr & IS_DmaErrInt) { 576 wantinit = 1; 577 aprint_error_dev(sc->sc_dev, 578 "WARNING: DMA error\n"); 579 } 580 581 /* Transmit FIFO underruns. */ 582 if (isr & IS_TxDataLowInt) { 583 if (sc->sc_txthresh < 0xff) 584 sc->sc_txthresh++; 585 printf("%s: transmit FIFO underrun, new " 586 "threshold: %d bytes\n", 587 device_xname(sc->sc_dev), 588 sc->sc_txthresh * 16); 589 sf_funcreg_write(sc, SF_TransmitFrameCSR, 590 sc->sc_TransmitFrameCSR | 591 TFCSR_TransmitThreshold(sc->sc_txthresh)); 592 sf_funcreg_write(sc, SF_TxDescQueueCtrl, 593 sc->sc_TxDescQueueCtrl | 594 TDQC_TxHighPriorityFifoThreshold( 595 sc->sc_txthresh)); 596 } 597 } 598 } 599 600 if (handled) { 601 /* Reset the interface, if necessary. */ 602 if (wantinit) 603 sf_init(&sc->sc_ethercom.ec_if); 604 605 /* Try and get more packets going. */ 606 if_schedule_deferred_start(&sc->sc_ethercom.ec_if); 607 } 608 609 return (handled); 610 } 611 612 /* 613 * sf_txintr: 614 * 615 * Helper -- handle transmit completion interrupts. 616 */ 617 static void 618 sf_txintr(struct sf_softc *sc) 619 { 620 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 621 struct sf_descsoft *ds; 622 uint32_t cqci, tcd; 623 int consumer, producer, txidx; 624 625 try_again: 626 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); 627 628 consumer = CQCI_TxCompletionConsumerIndex_get(cqci); 629 producer = CQPI_TxCompletionProducerIndex_get( 630 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); 631 632 if (consumer == producer) 633 return; 634 635 while (consumer != producer) { 636 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD); 637 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0); 638 639 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd)); 640 #ifdef DIAGNOSTIC 641 if ((tcd & TCD_PR) == 0) 642 aprint_error_dev(sc->sc_dev, 643 "Tx queue mismatch, index %d\n", txidx); 644 #endif 645 /* 646 * NOTE: stats are updated later. We're just 647 * releasing packets that have been DMA'd to 648 * the chip. 649 */ 650 ds = &sc->sc_txsoft[txidx]; 651 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE); 652 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 653 0, ds->ds_dmamap->dm_mapsize, 654 BUS_DMASYNC_POSTWRITE); 655 m_freem(ds->ds_mbuf); 656 ds->ds_mbuf = NULL; 657 658 consumer = SF_NEXTTCD(consumer); 659 sc->sc_txpending--; 660 } 661 662 /* XXXJRT -- should be KDASSERT() */ 663 KASSERT(sc->sc_txpending >= 0); 664 665 /* If all packets are done, cancel the watchdog timer. */ 666 if (sc->sc_txpending == 0) 667 ifp->if_timer = 0; 668 669 /* Update the consumer index. */ 670 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 671 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) | 672 CQCI_TxCompletionConsumerIndex(consumer)); 673 674 /* Double check for new completions. */ 675 goto try_again; 676 } 677 678 /* 679 * sf_rxintr: 680 * 681 * Helper -- handle receive interrupts. 682 */ 683 static void 684 sf_rxintr(struct sf_softc *sc) 685 { 686 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 687 struct sf_descsoft *ds; 688 struct sf_rcd_full *rcd; 689 struct mbuf *m; 690 uint32_t cqci, word0; 691 int consumer, producer, bufproducer, rxidx, len; 692 693 try_again: 694 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); 695 696 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci); 697 producer = CQPI_RxCompletionQ1ProducerIndex_get( 698 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); 699 bufproducer = RXQ1P_RxDescQ1Producer_get( 700 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs)); 701 702 if (consumer == producer) 703 return; 704 705 while (consumer != producer) { 706 rcd = &sc->sc_rxcomp[consumer]; 707 SF_CDRXCSYNC(sc, consumer, 708 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 709 SF_CDRXCSYNC(sc, consumer, 710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 711 712 word0 = le32toh(rcd->rcd_word0); 713 rxidx = RCD_W0_EndIndex(word0); 714 715 ds = &sc->sc_rxsoft[rxidx]; 716 717 consumer = SF_NEXTRCD(consumer); 718 bufproducer = SF_NEXTRX(bufproducer); 719 720 if ((word0 & RCD_W0_OK) == 0) { 721 SF_INIT_RXDESC(sc, rxidx); 722 continue; 723 } 724 725 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 726 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 727 728 /* 729 * No errors; receive the packet. Note that we have 730 * configured the Starfire to NOT transfer the CRC 731 * with the packet. 732 */ 733 len = RCD_W0_Length(word0); 734 735 #ifdef __NO_STRICT_ALIGNMENT 736 /* 737 * Allocate a new mbuf cluster. If that fails, we are 738 * out of memory, and must drop the packet and recycle 739 * the buffer that's already attached to this descriptor. 740 */ 741 m = ds->ds_mbuf; 742 if (sf_add_rxbuf(sc, rxidx) != 0) { 743 if_statinc(ifp, if_ierrors); 744 SF_INIT_RXDESC(sc, rxidx); 745 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 746 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 747 continue; 748 } 749 #else 750 /* 751 * The Starfire's receive buffer must be 4-byte aligned. 752 * But this means that the data after the Ethernet header 753 * is misaligned. We must allocate a new buffer and 754 * copy the data, shifted forward 2 bytes. 755 */ 756 MGETHDR(m, M_DONTWAIT, MT_DATA); 757 if (m == NULL) { 758 dropit: 759 if_statinc(ifp, if_ierrors); 760 SF_INIT_RXDESC(sc, rxidx); 761 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 762 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 763 continue; 764 } 765 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 766 if (len > (MHLEN - 2)) { 767 MCLGET(m, M_DONTWAIT); 768 if ((m->m_flags & M_EXT) == 0) { 769 m_freem(m); 770 goto dropit; 771 } 772 } 773 m->m_data += 2; 774 775 /* 776 * Note that we use cluster for incoming frames, so the 777 * buffer is virtually contiguous. 778 */ 779 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len); 780 781 /* Allow the receive descriptor to continue using its mbuf. */ 782 SF_INIT_RXDESC(sc, rxidx); 783 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 784 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 785 #endif /* __NO_STRICT_ALIGNMENT */ 786 787 m_set_rcvif(m, ifp); 788 m->m_pkthdr.len = m->m_len = len; 789 790 /* Pass it on. */ 791 if_percpuq_enqueue(ifp->if_percpuq, m); 792 } 793 794 /* Update the chip's pointers. */ 795 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 796 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) | 797 CQCI_RxCompletionQ1ConsumerIndex(consumer)); 798 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, 799 RXQ1P_RxDescQ1Producer(bufproducer)); 800 801 /* Double-check for any new completions. */ 802 goto try_again; 803 } 804 805 /* 806 * sf_tick: 807 * 808 * One second timer, used to tick the MII and update stats. 809 */ 810 static void 811 sf_tick(void *arg) 812 { 813 struct sf_softc *sc = arg; 814 int s; 815 816 s = splnet(); 817 mii_tick(&sc->sc_mii); 818 sf_stats_update(sc); 819 splx(s); 820 821 callout_schedule(&sc->sc_tick_callout, hz); 822 } 823 824 /* 825 * sf_stats_update: 826 * 827 * Read the statitistics counters. 828 */ 829 static void 830 sf_stats_update(struct sf_softc *sc) 831 { 832 struct sf_stats stats; 833 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 834 uint32_t *p; 835 u_int i; 836 837 p = &stats.TransmitOKFrames; 838 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) { 839 *p++ = sf_genreg_read(sc, 840 SF_STATS_BASE + (i * sizeof(uint32_t))); 841 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0); 842 } 843 844 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 845 846 if_statadd_ref(nsr, if_opackets, stats.TransmitOKFrames); 847 848 if_statadd_ref(nsr, if_collisions, 849 stats.SingleCollisionFrames + 850 stats.MultipleCollisionFrames); 851 852 if_statadd_ref(nsr, if_oerrors, 853 stats.TransmitAbortDueToExcessiveCollisions + 854 stats.TransmitAbortDueToExcessingDeferral + 855 stats.FramesLostDueToInternalTransmitErrors); 856 857 if_statadd_ref(nsr, if_ierrors, 858 stats.ReceiveCRCErrors + stats.AlignmentErrors + 859 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort + 860 stats.ReceiveFramesJabbersError + 861 stats.FramesLostDueToInternalReceiveErrors); 862 863 IF_STAT_PUTREF(ifp); 864 } 865 866 /* 867 * sf_reset: 868 * 869 * Perform a soft reset on the Starfire. 870 */ 871 static void 872 sf_reset(struct sf_softc *sc) 873 { 874 int i; 875 876 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); 877 878 sf_macreset(sc); 879 880 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset); 881 for (i = 0; i < 1000; i++) { 882 delay(10); 883 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) & 884 PDC_SoftReset) == 0) 885 break; 886 } 887 888 if (i == 1000) { 889 aprint_error_dev(sc->sc_dev, "reset failed to complete\n"); 890 sf_funcreg_write(sc, SF_PciDeviceConfig, 0); 891 } 892 893 delay(1000); 894 } 895 896 /* 897 * sf_macreset: 898 * 899 * Reset the MAC portion of the Starfire. 900 */ 901 static void 902 sf_macreset(struct sf_softc *sc) 903 { 904 905 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst); 906 delay(1000); 907 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); 908 } 909 910 /* 911 * sf_init: [ifnet interface function] 912 * 913 * Initialize the interface. Must be called at splnet(). 914 */ 915 static int 916 sf_init(struct ifnet *ifp) 917 { 918 struct sf_softc *sc = ifp->if_softc; 919 struct sf_descsoft *ds; 920 int error = 0; 921 u_int i; 922 923 /* 924 * Cancel any pending I/O. 925 */ 926 sf_stop(ifp, 0); 927 928 /* 929 * Reset the Starfire to a known state. 930 */ 931 sf_reset(sc); 932 933 /* Clear the stat counters. */ 934 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t)) 935 sf_genreg_write(sc, SF_STATS_BASE + i, 0); 936 937 /* 938 * Initialize the transmit descriptor ring. 939 */ 940 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 941 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0); 942 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0)); 943 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0); 944 945 /* 946 * Initialize the transmit completion ring. 947 */ 948 for (i = 0; i < SF_NTCD; i++) { 949 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID; 950 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE); 951 } 952 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0); 953 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0)); 954 955 /* 956 * Initialize the receive descriptor ring. 957 */ 958 for (i = 0; i < SF_NRXDESC; i++) { 959 ds = &sc->sc_rxsoft[i]; 960 if (ds->ds_mbuf == NULL) { 961 if ((error = sf_add_rxbuf(sc, i)) != 0) { 962 aprint_error_dev(sc->sc_dev, 963 "unable to allocate or map rx buffer %d, " 964 "error = %d\n", i, error); 965 /* 966 * XXX Should attempt to run with fewer receive 967 * XXX buffers instead of just failing. 968 */ 969 sf_rxdrain(sc); 970 goto out; 971 } 972 } else 973 SF_INIT_RXDESC(sc, i); 974 } 975 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0); 976 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0)); 977 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0); 978 979 /* 980 * Initialize the receive completion ring. 981 */ 982 for (i = 0; i < SF_NRCD; i++) { 983 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID; 984 sc->sc_rxcomp[i].rcd_word1 = 0; 985 sc->sc_rxcomp[i].rcd_word2 = 0; 986 sc->sc_rxcomp[i].rcd_timestamp = 0; 987 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE); 988 } 989 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) | 990 RCQ1C_RxCompletionQ1Type(3)); 991 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0); 992 993 /* 994 * Initialize the Tx CSR. 995 */ 996 sc->sc_TransmitFrameCSR = 0; 997 sf_funcreg_write(sc, SF_TransmitFrameCSR, 998 sc->sc_TransmitFrameCSR | 999 TFCSR_TransmitThreshold(sc->sc_txthresh)); 1000 1001 /* 1002 * Initialize the Tx descriptor control register. 1003 */ 1004 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) | 1005 TDQC_TxDmaBurstSize(4) | /* default */ 1006 TDQC_MinFrameSpacing(3) | /* 128 bytes */ 1007 TDQC_TxDescType(0); 1008 sf_funcreg_write(sc, SF_TxDescQueueCtrl, 1009 sc->sc_TxDescQueueCtrl | 1010 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh)); 1011 1012 /* 1013 * Initialize the Rx descriptor control registers. 1014 */ 1015 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl, 1016 RDQ1C_RxQ1BufferLength(MCLBYTES) | 1017 RDQ1C_RxDescSpacing(0)); 1018 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0); 1019 1020 /* 1021 * Initialize the Tx descriptor producer indices. 1022 */ 1023 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, 1024 TDQPI_HiPrTxProducerIndex(0) | 1025 TDQPI_LoPrTxProducerIndex(0)); 1026 1027 /* 1028 * Initialize the Rx descriptor producer indices. 1029 */ 1030 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, 1031 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1)); 1032 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs, 1033 RXQ2P_RxDescQ2Producer(0)); 1034 1035 /* 1036 * Initialize the Tx and Rx completion queue consumer indices. 1037 */ 1038 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 1039 CQCI_TxCompletionConsumerIndex(0) | 1040 CQCI_RxCompletionQ1ConsumerIndex(0)); 1041 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0); 1042 1043 /* 1044 * Initialize the Rx DMA control register. 1045 */ 1046 sf_funcreg_write(sc, SF_RxDmaCtrl, 1047 RDC_RxHighPriorityThreshold(6) | /* default */ 1048 RDC_RxBurstSize(4)); /* default */ 1049 1050 /* 1051 * Set the receive filter. 1052 */ 1053 sc->sc_RxAddressFilteringCtl = 0; 1054 sf_set_filter(sc); 1055 1056 /* 1057 * Set MacConfig1. When we set the media, MacConfig1 will 1058 * actually be written and the MAC part reset. 1059 */ 1060 sc->sc_MacConfig1 = MC1_PadEn; 1061 1062 /* 1063 * Set the media. 1064 */ 1065 if ((error = ether_mediachange(ifp)) != 0) 1066 goto out; 1067 1068 /* 1069 * Initialize the interrupt register. 1070 */ 1071 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt | 1072 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt | 1073 IS_StatisticWrapInt; 1074 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn); 1075 1076 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable | 1077 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT)); 1078 1079 /* 1080 * Start the transmit and receive processes. 1081 */ 1082 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 1083 GEC_TxDmaEn | GEC_RxDmaEn | GEC_TransmitEn | GEC_ReceiveEn); 1084 1085 /* Start the on second clock. */ 1086 callout_schedule(&sc->sc_tick_callout, hz); 1087 1088 /* 1089 * Note that the interface is now running. 1090 */ 1091 ifp->if_flags |= IFF_RUNNING; 1092 1093 out: 1094 if (error) { 1095 ifp->if_flags &= ~IFF_RUNNING; 1096 ifp->if_timer = 0; 1097 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1098 } 1099 return (error); 1100 } 1101 1102 /* 1103 * sf_rxdrain: 1104 * 1105 * Drain the receive queue. 1106 */ 1107 static void 1108 sf_rxdrain(struct sf_softc *sc) 1109 { 1110 struct sf_descsoft *ds; 1111 int i; 1112 1113 for (i = 0; i < SF_NRXDESC; i++) { 1114 ds = &sc->sc_rxsoft[i]; 1115 if (ds->ds_mbuf != NULL) { 1116 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1117 m_freem(ds->ds_mbuf); 1118 ds->ds_mbuf = NULL; 1119 } 1120 } 1121 } 1122 1123 /* 1124 * sf_stop: [ifnet interface function] 1125 * 1126 * Stop transmission on the interface. 1127 */ 1128 static void 1129 sf_stop(struct ifnet *ifp, int disable) 1130 { 1131 struct sf_softc *sc = ifp->if_softc; 1132 struct sf_descsoft *ds; 1133 int i; 1134 1135 /* Stop the one second clock. */ 1136 callout_stop(&sc->sc_tick_callout); 1137 1138 /* Down the MII. */ 1139 mii_down(&sc->sc_mii); 1140 1141 /* Disable interrupts. */ 1142 sf_funcreg_write(sc, SF_InterruptEn, 0); 1143 1144 /* Stop the transmit and receive processes. */ 1145 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); 1146 1147 /* 1148 * Release any queued transmit buffers. 1149 */ 1150 for (i = 0; i < SF_NTXDESC; i++) { 1151 ds = &sc->sc_txsoft[i]; 1152 if (ds->ds_mbuf != NULL) { 1153 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1154 m_freem(ds->ds_mbuf); 1155 ds->ds_mbuf = NULL; 1156 } 1157 } 1158 1159 /* 1160 * Mark the interface down and cancel the watchdog timer. 1161 */ 1162 ifp->if_flags &= ~IFF_RUNNING; 1163 ifp->if_timer = 0; 1164 1165 if (disable) 1166 sf_rxdrain(sc); 1167 } 1168 1169 /* 1170 * sf_read_eeprom: 1171 * 1172 * Read from the Starfire EEPROM. 1173 */ 1174 static uint8_t 1175 sf_read_eeprom(struct sf_softc *sc, int offset) 1176 { 1177 uint32_t reg; 1178 1179 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3)); 1180 1181 return ((reg >> (8 * (offset & 3))) & 0xff); 1182 } 1183 1184 /* 1185 * sf_add_rxbuf: 1186 * 1187 * Add a receive buffer to the indicated descriptor. 1188 */ 1189 static int 1190 sf_add_rxbuf(struct sf_softc *sc, int idx) 1191 { 1192 struct sf_descsoft *ds = &sc->sc_rxsoft[idx]; 1193 struct mbuf *m; 1194 int error; 1195 1196 MGETHDR(m, M_DONTWAIT, MT_DATA); 1197 if (m == NULL) 1198 return (ENOBUFS); 1199 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1200 1201 MCLGET(m, M_DONTWAIT); 1202 if ((m->m_flags & M_EXT) == 0) { 1203 m_freem(m); 1204 return (ENOBUFS); 1205 } 1206 1207 if (ds->ds_mbuf != NULL) 1208 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1209 1210 ds->ds_mbuf = m; 1211 1212 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1213 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1214 BUS_DMA_READ | BUS_DMA_NOWAIT); 1215 if (error) { 1216 aprint_error_dev(sc->sc_dev, 1217 "can't load rx DMA map %d, error = %d\n", idx, error); 1218 panic("sf_add_rxbuf"); /* XXX */ 1219 } 1220 1221 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1222 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1223 1224 SF_INIT_RXDESC(sc, idx); 1225 1226 return (0); 1227 } 1228 1229 static void 1230 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr) 1231 { 1232 uint32_t reg0, reg1, reg2; 1233 1234 reg0 = enaddr[5] | (enaddr[4] << 8); 1235 reg1 = enaddr[3] | (enaddr[2] << 8); 1236 reg2 = enaddr[1] | (enaddr[0] << 8); 1237 1238 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0); 1239 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1); 1240 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2); 1241 } 1242 1243 static void 1244 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr) 1245 { 1246 uint32_t hash, slot, reg; 1247 1248 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23; 1249 slot = hash >> 4; 1250 1251 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10)); 1252 reg |= 1 << (hash & 0xf); 1253 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg); 1254 } 1255 1256 /* 1257 * sf_set_filter: 1258 * 1259 * Set the Starfire receive filter. 1260 */ 1261 static void 1262 sf_set_filter(struct sf_softc *sc) 1263 { 1264 struct ethercom *ec = &sc->sc_ethercom; 1265 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1266 struct ether_multi *enm; 1267 struct ether_multistep step; 1268 int i; 1269 1270 /* Start by clearing the perfect and hash tables. */ 1271 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t)) 1272 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0); 1273 1274 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t)) 1275 sf_genreg_write(sc, SF_HASH_BASE + i, 0); 1276 1277 /* 1278 * Clear the perfect and hash mode bits. 1279 */ 1280 sc->sc_RxAddressFilteringCtl &= 1281 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3)); 1282 1283 if (ifp->if_flags & IFF_BROADCAST) 1284 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast; 1285 else 1286 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast; 1287 1288 if (ifp->if_flags & IFF_PROMISC) { 1289 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode; 1290 goto allmulti; 1291 } else 1292 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode; 1293 1294 /* 1295 * Set normal perfect filtering mode. 1296 */ 1297 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1); 1298 1299 /* 1300 * First, write the station address to the perfect filter 1301 * table. 1302 */ 1303 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl)); 1304 1305 /* 1306 * Now set the hash bits for each multicast address in our 1307 * list. 1308 */ 1309 ETHER_LOCK(ec); 1310 ETHER_FIRST_MULTI(step, ec, enm); 1311 if (enm == NULL) { 1312 ETHER_UNLOCK(ec); 1313 goto done; 1314 } 1315 while (enm != NULL) { 1316 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1317 /* 1318 * We must listen to a range of multicast addresses. 1319 * For now, just accept all multicasts, rather than 1320 * trying to set only those filter bits needed to match 1321 * the range. (At this time, the only use of address 1322 * ranges is for IP multicast routing, for which the 1323 * range is big enough to require all bits set.) 1324 */ 1325 ETHER_UNLOCK(ec); 1326 goto allmulti; 1327 } 1328 sf_set_filter_hash(sc, enm->enm_addrlo); 1329 ETHER_NEXT_MULTI(step, enm); 1330 } 1331 ETHER_UNLOCK(ec); 1332 1333 /* 1334 * Set "hash only multicast dest, match regardless of VLAN ID". 1335 */ 1336 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2); 1337 goto done; 1338 1339 allmulti: 1340 /* 1341 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode. 1342 */ 1343 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast; 1344 ifp->if_flags |= IFF_ALLMULTI; 1345 1346 done: 1347 sf_funcreg_write(sc, SF_RxAddressFilteringCtl, 1348 sc->sc_RxAddressFilteringCtl); 1349 } 1350 1351 /* 1352 * sf_mii_read: [mii interface function] 1353 * 1354 * Read from the MII. 1355 */ 1356 static int 1357 sf_mii_read(device_t self, int phy, int reg, uint16_t *data) 1358 { 1359 struct sf_softc *sc = device_private(self); 1360 uint32_t v; 1361 int i; 1362 1363 for (i = 0; i < 1000; i++) { 1364 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)); 1365 if (v & MiiDataValid) 1366 break; 1367 delay(1); 1368 } 1369 1370 if ((v & MiiDataValid) == 0) 1371 return -1; 1372 1373 if (MiiRegDataPort(v) == 0xffff) 1374 return -1; 1375 1376 *data = MiiRegDataPort(v); 1377 return 0; 1378 } 1379 1380 /* 1381 * sf_mii_write: [mii interface function] 1382 * 1383 * Write to the MII. 1384 */ 1385 static int 1386 sf_mii_write(device_t self, int phy, int reg, uint16_t val) 1387 { 1388 struct sf_softc *sc = device_private(self); 1389 int i; 1390 1391 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val); 1392 1393 for (i = 0; i < 1000; i++) { 1394 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) & 1395 MiiBusy) == 0) 1396 return 0; 1397 delay(1); 1398 } 1399 1400 printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); 1401 return ETIMEDOUT; 1402 } 1403 1404 /* 1405 * sf_mii_statchg: [mii interface function] 1406 * 1407 * Callback from the PHY when the media changes. 1408 */ 1409 static void 1410 sf_mii_statchg(struct ifnet *ifp) 1411 { 1412 struct sf_softc *sc = ifp->if_softc; 1413 uint32_t ipg; 1414 1415 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1416 sc->sc_MacConfig1 |= MC1_FullDuplex; 1417 ipg = 0x15; 1418 } else { 1419 sc->sc_MacConfig1 &= ~MC1_FullDuplex; 1420 ipg = 0x11; 1421 } 1422 1423 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); 1424 sf_macreset(sc); 1425 1426 sf_genreg_write(sc, SF_BkToBkIPG, ipg); 1427 } 1428