1 /* $NetBSD: aic6915.c,v 1.34 2016/12/15 09:28:05 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the Adaptec AIC-6915 (``Starfire'') 34 * 10/100 Ethernet controller. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.34 2016/12/15 09:28:05 ozaki-r Exp $"); 39 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/callout.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/if_ether.h> 56 57 #include <net/bpf.h> 58 59 #include <sys/bus.h> 60 #include <sys/intr.h> 61 62 #include <dev/mii/miivar.h> 63 64 #include <dev/ic/aic6915reg.h> 65 #include <dev/ic/aic6915var.h> 66 67 static void sf_start(struct ifnet *); 68 static void sf_watchdog(struct ifnet *); 69 static int sf_ioctl(struct ifnet *, u_long, void *); 70 static int sf_init(struct ifnet *); 71 static void sf_stop(struct ifnet *, int); 72 73 static bool sf_shutdown(device_t, int); 74 75 static void sf_txintr(struct sf_softc *); 76 static void sf_rxintr(struct sf_softc *); 77 static void sf_stats_update(struct sf_softc *); 78 79 static void sf_reset(struct sf_softc *); 80 static void sf_macreset(struct sf_softc *); 81 static void sf_rxdrain(struct sf_softc *); 82 static int sf_add_rxbuf(struct sf_softc *, int); 83 static uint8_t sf_read_eeprom(struct sf_softc *, int); 84 static void sf_set_filter(struct sf_softc *); 85 86 static int sf_mii_read(device_t, int, int); 87 static void sf_mii_write(device_t, int, int, int); 88 static void sf_mii_statchg(struct ifnet *); 89 90 static void sf_tick(void *); 91 92 #define sf_funcreg_read(sc, reg) \ 93 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg)) 94 #define sf_funcreg_write(sc, reg, val) \ 95 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val)) 96 97 static inline uint32_t 98 sf_reg_read(struct sf_softc *sc, bus_addr_t reg) 99 { 100 101 if (__predict_false(sc->sc_iomapped)) { 102 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, 103 reg); 104 return (bus_space_read_4(sc->sc_st, sc->sc_sh, 105 SF_IndirectIoDataPort)); 106 } 107 108 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg)); 109 } 110 111 static inline void 112 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val) 113 { 114 115 if (__predict_false(sc->sc_iomapped)) { 116 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess, 117 reg); 118 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort, 119 val); 120 return; 121 } 122 123 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val); 124 } 125 126 #define sf_genreg_read(sc, reg) \ 127 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET) 128 #define sf_genreg_write(sc, reg, val) \ 129 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val)) 130 131 /* 132 * sf_attach: 133 * 134 * Attach a Starfire interface to the system. 135 */ 136 void 137 sf_attach(struct sf_softc *sc) 138 { 139 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 140 int i, rseg, error; 141 bus_dma_segment_t seg; 142 u_int8_t enaddr[ETHER_ADDR_LEN]; 143 144 callout_init(&sc->sc_tick_callout, 0); 145 146 /* 147 * If we're I/O mapped, the functional register handle is 148 * the same as the base handle. If we're memory mapped, 149 * carve off a chunk of the register space for the functional 150 * registers, to save on arithmetic later. 151 */ 152 if (sc->sc_iomapped) 153 sc->sc_sh_func = sc->sc_sh; 154 else { 155 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh, 156 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) { 157 aprint_error_dev(sc->sc_dev, "unable to sub-region " 158 "functional registers, error = %d\n", error); 159 return; 160 } 161 } 162 163 /* 164 * Initialize the transmit threshold for this interface. The 165 * manual describes the default as 4 * 16 bytes. We start out 166 * at 10 * 16 bytes, to avoid a bunch of initial underruns on 167 * several platforms. 168 */ 169 sc->sc_txthresh = 10; 170 171 /* 172 * Allocate the control data structures, and create and load the 173 * DMA map for it. 174 */ 175 if ((error = bus_dmamem_alloc(sc->sc_dmat, 176 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 177 BUS_DMA_NOWAIT)) != 0) { 178 aprint_error_dev(sc->sc_dev, 179 "unable to allocate control data, error = %d\n", error); 180 goto fail_0; 181 } 182 183 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 184 sizeof(struct sf_control_data), (void **)&sc->sc_control_data, 185 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 186 aprint_error_dev(sc->sc_dev, 187 "unable to map control data, error = %d\n", error); 188 goto fail_1; 189 } 190 191 if ((error = bus_dmamap_create(sc->sc_dmat, 192 sizeof(struct sf_control_data), 1, 193 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT, 194 &sc->sc_cddmamap)) != 0) { 195 aprint_error_dev(sc->sc_dev, "unable to create control data " 196 "DMA map, error = %d\n", error); 197 goto fail_2; 198 } 199 200 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 201 sc->sc_control_data, sizeof(struct sf_control_data), NULL, 202 BUS_DMA_NOWAIT)) != 0) { 203 aprint_error_dev(sc->sc_dev, "unable to load control data " 204 "DMA map, error = %d\n", error); 205 goto fail_3; 206 } 207 208 /* 209 * Create the transmit buffer DMA maps. 210 */ 211 for (i = 0; i < SF_NTXDESC; i++) { 212 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 213 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 214 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 215 aprint_error_dev(sc->sc_dev, 216 "unable to create tx DMA map %d, error = %d\n", i, 217 error); 218 goto fail_4; 219 } 220 } 221 222 /* 223 * Create the receive buffer DMA maps. 224 */ 225 for (i = 0; i < SF_NRXDESC; i++) { 226 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 227 MCLBYTES, 0, BUS_DMA_NOWAIT, 228 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 229 aprint_error_dev(sc->sc_dev, 230 "unable to create rx DMA map %d, error = %d\n", i, 231 error); 232 goto fail_5; 233 } 234 } 235 236 /* 237 * Reset the chip to a known state. 238 */ 239 sf_reset(sc); 240 241 /* 242 * Read the Ethernet address from the EEPROM. 243 */ 244 for (i = 0; i < ETHER_ADDR_LEN; i++) 245 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i); 246 247 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev), 248 ether_sprintf(enaddr)); 249 250 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64) 251 printf("%s: 64-bit PCI slot detected\n", 252 device_xname(sc->sc_dev)); 253 254 /* 255 * Initialize our media structures and probe the MII. 256 */ 257 sc->sc_mii.mii_ifp = ifp; 258 sc->sc_mii.mii_readreg = sf_mii_read; 259 sc->sc_mii.mii_writereg = sf_mii_write; 260 sc->sc_mii.mii_statchg = sf_mii_statchg; 261 sc->sc_ethercom.ec_mii = &sc->sc_mii; 262 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, 263 ether_mediastatus); 264 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 265 MII_OFFSET_ANY, 0); 266 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 267 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 269 } else 270 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 271 272 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 273 ifp->if_softc = sc; 274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 275 ifp->if_ioctl = sf_ioctl; 276 ifp->if_start = sf_start; 277 ifp->if_watchdog = sf_watchdog; 278 ifp->if_init = sf_init; 279 ifp->if_stop = sf_stop; 280 IFQ_SET_READY(&ifp->if_snd); 281 282 /* 283 * Attach the interface. 284 */ 285 if_attach(ifp); 286 ether_ifattach(ifp, enaddr); 287 288 /* 289 * Make sure the interface is shutdown during reboot. 290 */ 291 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown)) 292 pmf_class_network_register(sc->sc_dev, ifp); 293 else 294 aprint_error_dev(sc->sc_dev, 295 "couldn't establish power handler\n"); 296 return; 297 298 /* 299 * Free any resources we've allocated during the failed attach 300 * attempt. Do this in reverse order an fall through. 301 */ 302 fail_5: 303 for (i = 0; i < SF_NRXDESC; i++) { 304 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 305 bus_dmamap_destroy(sc->sc_dmat, 306 sc->sc_rxsoft[i].ds_dmamap); 307 } 308 fail_4: 309 for (i = 0; i < SF_NTXDESC; i++) { 310 if (sc->sc_txsoft[i].ds_dmamap != NULL) 311 bus_dmamap_destroy(sc->sc_dmat, 312 sc->sc_txsoft[i].ds_dmamap); 313 } 314 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 315 fail_3: 316 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 317 fail_2: 318 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data, 319 sizeof(struct sf_control_data)); 320 fail_1: 321 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 322 fail_0: 323 return; 324 } 325 326 /* 327 * sf_shutdown: 328 * 329 * Shutdown hook -- make sure the interface is stopped at reboot. 330 */ 331 static bool 332 sf_shutdown(device_t self, int howto) 333 { 334 struct sf_softc *sc; 335 336 sc = device_private(self); 337 sf_stop(&sc->sc_ethercom.ec_if, 1); 338 339 return true; 340 } 341 342 /* 343 * sf_start: [ifnet interface function] 344 * 345 * Start packet transmission on the interface. 346 */ 347 static void 348 sf_start(struct ifnet *ifp) 349 { 350 struct sf_softc *sc = ifp->if_softc; 351 struct mbuf *m0, *m; 352 struct sf_txdesc0 *txd; 353 struct sf_descsoft *ds; 354 bus_dmamap_t dmamap; 355 int error, producer, last = -1, opending, seg; 356 357 /* 358 * Remember the previous number of pending transmits. 359 */ 360 opending = sc->sc_txpending; 361 362 /* 363 * Find out where we're sitting. 364 */ 365 producer = SF_TXDINDEX_TO_HOST( 366 TDQPI_HiPrTxProducerIndex_get( 367 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex))); 368 369 /* 370 * Loop through the send queue, setting up transmit descriptors 371 * until we drain the queue, or use up all available transmit 372 * descriptors. Leave a blank one at the end for sanity's sake. 373 */ 374 while (sc->sc_txpending < (SF_NTXDESC - 1)) { 375 /* 376 * Grab a packet off the queue. 377 */ 378 IFQ_POLL(&ifp->if_snd, m0); 379 if (m0 == NULL) 380 break; 381 m = NULL; 382 383 /* 384 * Get the transmit descriptor. 385 */ 386 txd = &sc->sc_txdescs[producer]; 387 ds = &sc->sc_txsoft[producer]; 388 dmamap = ds->ds_dmamap; 389 390 /* 391 * Load the DMA map. If this fails, the packet either 392 * didn't fit in the allotted number of frags, or we were 393 * short on resources. In this case, we'll copy and try 394 * again. 395 */ 396 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 397 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 398 MGETHDR(m, M_DONTWAIT, MT_DATA); 399 if (m == NULL) { 400 aprint_error_dev(sc->sc_dev, 401 "unable to allocate Tx mbuf\n"); 402 break; 403 } 404 if (m0->m_pkthdr.len > MHLEN) { 405 MCLGET(m, M_DONTWAIT); 406 if ((m->m_flags & M_EXT) == 0) { 407 aprint_error_dev(sc->sc_dev, 408 "unable to allocate Tx cluster\n"); 409 m_freem(m); 410 break; 411 } 412 } 413 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 414 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 415 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 416 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 417 if (error) { 418 aprint_error_dev(sc->sc_dev, 419 "unable to load Tx buffer, error = %d\n", 420 error); 421 break; 422 } 423 } 424 425 /* 426 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 427 */ 428 IFQ_DEQUEUE(&ifp->if_snd, m0); 429 if (m != NULL) { 430 m_freem(m0); 431 m0 = m; 432 } 433 434 /* Initialize the descriptor. */ 435 txd->td_word0 = 436 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len); 437 if (producer == (SF_NTXDESC - 1)) 438 txd->td_word0 |= TD_W0_END; 439 txd->td_word1 = htole32(dmamap->dm_nsegs); 440 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 441 txd->td_frags[seg].fr_addr = 442 htole32(dmamap->dm_segs[seg].ds_addr); 443 txd->td_frags[seg].fr_len = 444 htole32(dmamap->dm_segs[seg].ds_len); 445 } 446 447 /* Sync the descriptor and the DMA map. */ 448 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE); 449 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 450 BUS_DMASYNC_PREWRITE); 451 452 /* 453 * Store a pointer to the packet so we can free it later. 454 */ 455 ds->ds_mbuf = m0; 456 457 /* Advance the Tx pointer. */ 458 sc->sc_txpending++; 459 last = producer; 460 producer = SF_NEXTTX(producer); 461 462 /* 463 * Pass the packet to any BPF listeners. 464 */ 465 bpf_mtap(ifp, m0); 466 } 467 468 if (sc->sc_txpending == (SF_NTXDESC - 1)) { 469 /* No more slots left; notify upper layer. */ 470 ifp->if_flags |= IFF_OACTIVE; 471 } 472 473 if (sc->sc_txpending != opending) { 474 KASSERT(last != -1); 475 /* 476 * We enqueued packets. Cause a transmit interrupt to 477 * happen on the last packet we enqueued, and give the 478 * new descriptors to the chip by writing the new 479 * producer index. 480 */ 481 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR; 482 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE); 483 484 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, 485 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer))); 486 487 /* Set a watchdog timer in case the chip flakes out. */ 488 ifp->if_timer = 5; 489 } 490 } 491 492 /* 493 * sf_watchdog: [ifnet interface function] 494 * 495 * Watchdog timer handler. 496 */ 497 static void 498 sf_watchdog(struct ifnet *ifp) 499 { 500 struct sf_softc *sc = ifp->if_softc; 501 502 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 503 ifp->if_oerrors++; 504 505 (void) sf_init(ifp); 506 507 /* Try to get more packets going. */ 508 sf_start(ifp); 509 } 510 511 /* 512 * sf_ioctl: [ifnet interface function] 513 * 514 * Handle control requests from the operator. 515 */ 516 static int 517 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data) 518 { 519 struct sf_softc *sc = ifp->if_softc; 520 int s, error; 521 522 s = splnet(); 523 524 error = ether_ioctl(ifp, cmd, data); 525 if (error == ENETRESET) { 526 /* 527 * Multicast list has changed; set the hardware filter 528 * accordingly. 529 */ 530 if (ifp->if_flags & IFF_RUNNING) 531 sf_set_filter(sc); 532 error = 0; 533 } 534 535 /* Try to get more packets going. */ 536 sf_start(ifp); 537 538 splx(s); 539 return (error); 540 } 541 542 /* 543 * sf_intr: 544 * 545 * Interrupt service routine. 546 */ 547 int 548 sf_intr(void *arg) 549 { 550 struct sf_softc *sc = arg; 551 uint32_t isr; 552 int handled = 0, wantinit = 0; 553 554 for (;;) { 555 /* Reading clears all interrupts we're interested in. */ 556 isr = sf_funcreg_read(sc, SF_InterruptStatus); 557 if ((isr & IS_PCIPadInt) == 0) 558 break; 559 560 handled = 1; 561 562 /* Handle receive interrupts. */ 563 if (isr & IS_RxQ1DoneInt) 564 sf_rxintr(sc); 565 566 /* Handle transmit completion interrupts. */ 567 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt)) 568 sf_txintr(sc); 569 570 /* Handle abnormal interrupts. */ 571 if (isr & IS_AbnormalInterrupt) { 572 /* Statistics. */ 573 if (isr & IS_StatisticWrapInt) 574 sf_stats_update(sc); 575 576 /* DMA errors. */ 577 if (isr & IS_DmaErrInt) { 578 wantinit = 1; 579 aprint_error_dev(sc->sc_dev, 580 "WARNING: DMA error\n"); 581 } 582 583 /* Transmit FIFO underruns. */ 584 if (isr & IS_TxDataLowInt) { 585 if (sc->sc_txthresh < 0xff) 586 sc->sc_txthresh++; 587 printf("%s: transmit FIFO underrun, new " 588 "threshold: %d bytes\n", 589 device_xname(sc->sc_dev), 590 sc->sc_txthresh * 16); 591 sf_funcreg_write(sc, SF_TransmitFrameCSR, 592 sc->sc_TransmitFrameCSR | 593 TFCSR_TransmitThreshold(sc->sc_txthresh)); 594 sf_funcreg_write(sc, SF_TxDescQueueCtrl, 595 sc->sc_TxDescQueueCtrl | 596 TDQC_TxHighPriorityFifoThreshold( 597 sc->sc_txthresh)); 598 } 599 } 600 } 601 602 if (handled) { 603 /* Reset the interface, if necessary. */ 604 if (wantinit) 605 sf_init(&sc->sc_ethercom.ec_if); 606 607 /* Try and get more packets going. */ 608 sf_start(&sc->sc_ethercom.ec_if); 609 } 610 611 return (handled); 612 } 613 614 /* 615 * sf_txintr: 616 * 617 * Helper -- handle transmit completion interrupts. 618 */ 619 static void 620 sf_txintr(struct sf_softc *sc) 621 { 622 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 623 struct sf_descsoft *ds; 624 uint32_t cqci, tcd; 625 int consumer, producer, txidx; 626 627 try_again: 628 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); 629 630 consumer = CQCI_TxCompletionConsumerIndex_get(cqci); 631 producer = CQPI_TxCompletionProducerIndex_get( 632 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); 633 634 if (consumer == producer) 635 return; 636 637 ifp->if_flags &= ~IFF_OACTIVE; 638 639 while (consumer != producer) { 640 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD); 641 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0); 642 643 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd)); 644 #ifdef DIAGNOSTIC 645 if ((tcd & TCD_PR) == 0) 646 aprint_error_dev(sc->sc_dev, 647 "Tx queue mismatch, index %d\n", txidx); 648 #endif 649 /* 650 * NOTE: stats are updated later. We're just 651 * releasing packets that have been DMA'd to 652 * the chip. 653 */ 654 ds = &sc->sc_txsoft[txidx]; 655 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE); 656 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 657 0, ds->ds_dmamap->dm_mapsize, 658 BUS_DMASYNC_POSTWRITE); 659 m_freem(ds->ds_mbuf); 660 ds->ds_mbuf = NULL; 661 662 consumer = SF_NEXTTCD(consumer); 663 sc->sc_txpending--; 664 } 665 666 /* XXXJRT -- should be KDASSERT() */ 667 KASSERT(sc->sc_txpending >= 0); 668 669 /* If all packets are done, cancel the watchdog timer. */ 670 if (sc->sc_txpending == 0) 671 ifp->if_timer = 0; 672 673 /* Update the consumer index. */ 674 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 675 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) | 676 CQCI_TxCompletionConsumerIndex(consumer)); 677 678 /* Double check for new completions. */ 679 goto try_again; 680 } 681 682 /* 683 * sf_rxintr: 684 * 685 * Helper -- handle receive interrupts. 686 */ 687 static void 688 sf_rxintr(struct sf_softc *sc) 689 { 690 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 691 struct sf_descsoft *ds; 692 struct sf_rcd_full *rcd; 693 struct mbuf *m; 694 uint32_t cqci, word0; 695 int consumer, producer, bufproducer, rxidx, len; 696 697 try_again: 698 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex); 699 700 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci); 701 producer = CQPI_RxCompletionQ1ProducerIndex_get( 702 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex)); 703 bufproducer = RXQ1P_RxDescQ1Producer_get( 704 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs)); 705 706 if (consumer == producer) 707 return; 708 709 while (consumer != producer) { 710 rcd = &sc->sc_rxcomp[consumer]; 711 SF_CDRXCSYNC(sc, consumer, 712 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 713 SF_CDRXCSYNC(sc, consumer, 714 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 715 716 word0 = le32toh(rcd->rcd_word0); 717 rxidx = RCD_W0_EndIndex(word0); 718 719 ds = &sc->sc_rxsoft[rxidx]; 720 721 consumer = SF_NEXTRCD(consumer); 722 bufproducer = SF_NEXTRX(bufproducer); 723 724 if ((word0 & RCD_W0_OK) == 0) { 725 SF_INIT_RXDESC(sc, rxidx); 726 continue; 727 } 728 729 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 730 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 731 732 /* 733 * No errors; receive the packet. Note that we have 734 * configured the Starfire to NOT transfer the CRC 735 * with the packet. 736 */ 737 len = RCD_W0_Length(word0); 738 739 #ifdef __NO_STRICT_ALIGNMENT 740 /* 741 * Allocate a new mbuf cluster. If that fails, we are 742 * out of memory, and must drop the packet and recycle 743 * the buffer that's already attached to this descriptor. 744 */ 745 m = ds->ds_mbuf; 746 if (sf_add_rxbuf(sc, rxidx) != 0) { 747 ifp->if_ierrors++; 748 SF_INIT_RXDESC(sc, rxidx); 749 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 750 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 751 continue; 752 } 753 #else 754 /* 755 * The Starfire's receive buffer must be 4-byte aligned. 756 * But this means that the data after the Ethernet header 757 * is misaligned. We must allocate a new buffer and 758 * copy the data, shifted forward 2 bytes. 759 */ 760 MGETHDR(m, M_DONTWAIT, MT_DATA); 761 if (m == NULL) { 762 dropit: 763 ifp->if_ierrors++; 764 SF_INIT_RXDESC(sc, rxidx); 765 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 766 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 767 continue; 768 } 769 if (len > (MHLEN - 2)) { 770 MCLGET(m, M_DONTWAIT); 771 if ((m->m_flags & M_EXT) == 0) { 772 m_freem(m); 773 goto dropit; 774 } 775 } 776 m->m_data += 2; 777 778 /* 779 * Note that we use cluster for incoming frames, so the 780 * buffer is virtually contiguous. 781 */ 782 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len); 783 784 /* Allow the receive descriptor to continue using its mbuf. */ 785 SF_INIT_RXDESC(sc, rxidx); 786 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 787 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 788 #endif /* __NO_STRICT_ALIGNMENT */ 789 790 m_set_rcvif(m, ifp); 791 m->m_pkthdr.len = m->m_len = len; 792 793 /* Pass it on. */ 794 if_percpuq_enqueue(ifp->if_percpuq, m); 795 } 796 797 /* Update the chip's pointers. */ 798 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 799 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) | 800 CQCI_RxCompletionQ1ConsumerIndex(consumer)); 801 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, 802 RXQ1P_RxDescQ1Producer(bufproducer)); 803 804 /* Double-check for any new completions. */ 805 goto try_again; 806 } 807 808 /* 809 * sf_tick: 810 * 811 * One second timer, used to tick the MII and update stats. 812 */ 813 static void 814 sf_tick(void *arg) 815 { 816 struct sf_softc *sc = arg; 817 int s; 818 819 s = splnet(); 820 mii_tick(&sc->sc_mii); 821 sf_stats_update(sc); 822 splx(s); 823 824 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc); 825 } 826 827 /* 828 * sf_stats_update: 829 * 830 * Read the statitistics counters. 831 */ 832 static void 833 sf_stats_update(struct sf_softc *sc) 834 { 835 struct sf_stats stats; 836 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 837 uint32_t *p; 838 u_int i; 839 840 p = &stats.TransmitOKFrames; 841 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) { 842 *p++ = sf_genreg_read(sc, 843 SF_STATS_BASE + (i * sizeof(uint32_t))); 844 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0); 845 } 846 847 ifp->if_opackets += stats.TransmitOKFrames; 848 849 ifp->if_collisions += stats.SingleCollisionFrames + 850 stats.MultipleCollisionFrames; 851 852 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions + 853 stats.TransmitAbortDueToExcessingDeferral + 854 stats.FramesLostDueToInternalTransmitErrors; 855 856 ifp->if_ipackets += stats.ReceiveOKFrames; 857 858 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors + 859 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort + 860 stats.ReceiveFramesJabbersError + 861 stats.FramesLostDueToInternalReceiveErrors; 862 } 863 864 /* 865 * sf_reset: 866 * 867 * Perform a soft reset on the Starfire. 868 */ 869 static void 870 sf_reset(struct sf_softc *sc) 871 { 872 int i; 873 874 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); 875 876 sf_macreset(sc); 877 878 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset); 879 for (i = 0; i < 1000; i++) { 880 delay(10); 881 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) & 882 PDC_SoftReset) == 0) 883 break; 884 } 885 886 if (i == 1000) { 887 aprint_error_dev(sc->sc_dev, "reset failed to complete\n"); 888 sf_funcreg_write(sc, SF_PciDeviceConfig, 0); 889 } 890 891 delay(1000); 892 } 893 894 /* 895 * sf_macreset: 896 * 897 * Reset the MAC portion of the Starfire. 898 */ 899 static void 900 sf_macreset(struct sf_softc *sc) 901 { 902 903 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst); 904 delay(1000); 905 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); 906 } 907 908 /* 909 * sf_init: [ifnet interface function] 910 * 911 * Initialize the interface. Must be called at splnet(). 912 */ 913 static int 914 sf_init(struct ifnet *ifp) 915 { 916 struct sf_softc *sc = ifp->if_softc; 917 struct sf_descsoft *ds; 918 int error = 0; 919 u_int i; 920 921 /* 922 * Cancel any pending I/O. 923 */ 924 sf_stop(ifp, 0); 925 926 /* 927 * Reset the Starfire to a known state. 928 */ 929 sf_reset(sc); 930 931 /* Clear the stat counters. */ 932 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t)) 933 sf_genreg_write(sc, SF_STATS_BASE + i, 0); 934 935 /* 936 * Initialize the transmit descriptor ring. 937 */ 938 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 939 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0); 940 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0)); 941 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0); 942 943 /* 944 * Initialize the transmit completion ring. 945 */ 946 for (i = 0; i < SF_NTCD; i++) { 947 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID; 948 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 949 } 950 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0); 951 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0)); 952 953 /* 954 * Initialize the receive descriptor ring. 955 */ 956 for (i = 0; i < SF_NRXDESC; i++) { 957 ds = &sc->sc_rxsoft[i]; 958 if (ds->ds_mbuf == NULL) { 959 if ((error = sf_add_rxbuf(sc, i)) != 0) { 960 aprint_error_dev(sc->sc_dev, 961 "unable to allocate or map rx buffer %d, " 962 "error = %d\n", i, error); 963 /* 964 * XXX Should attempt to run with fewer receive 965 * XXX buffers instead of just failing. 966 */ 967 sf_rxdrain(sc); 968 goto out; 969 } 970 } else 971 SF_INIT_RXDESC(sc, i); 972 } 973 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0); 974 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0)); 975 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0); 976 977 /* 978 * Initialize the receive completion ring. 979 */ 980 for (i = 0; i < SF_NRCD; i++) { 981 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID; 982 sc->sc_rxcomp[i].rcd_word1 = 0; 983 sc->sc_rxcomp[i].rcd_word2 = 0; 984 sc->sc_rxcomp[i].rcd_timestamp = 0; 985 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 986 } 987 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) | 988 RCQ1C_RxCompletionQ1Type(3)); 989 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0); 990 991 /* 992 * Initialize the Tx CSR. 993 */ 994 sc->sc_TransmitFrameCSR = 0; 995 sf_funcreg_write(sc, SF_TransmitFrameCSR, 996 sc->sc_TransmitFrameCSR | 997 TFCSR_TransmitThreshold(sc->sc_txthresh)); 998 999 /* 1000 * Initialize the Tx descriptor control register. 1001 */ 1002 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) | 1003 TDQC_TxDmaBurstSize(4) | /* default */ 1004 TDQC_MinFrameSpacing(3) | /* 128 bytes */ 1005 TDQC_TxDescType(0); 1006 sf_funcreg_write(sc, SF_TxDescQueueCtrl, 1007 sc->sc_TxDescQueueCtrl | 1008 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh)); 1009 1010 /* 1011 * Initialize the Rx descriptor control registers. 1012 */ 1013 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl, 1014 RDQ1C_RxQ1BufferLength(MCLBYTES) | 1015 RDQ1C_RxDescSpacing(0)); 1016 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0); 1017 1018 /* 1019 * Initialize the Tx descriptor producer indices. 1020 */ 1021 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex, 1022 TDQPI_HiPrTxProducerIndex(0) | 1023 TDQPI_LoPrTxProducerIndex(0)); 1024 1025 /* 1026 * Initialize the Rx descriptor producer indices. 1027 */ 1028 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs, 1029 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1)); 1030 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs, 1031 RXQ2P_RxDescQ2Producer(0)); 1032 1033 /* 1034 * Initialize the Tx and Rx completion queue consumer indices. 1035 */ 1036 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex, 1037 CQCI_TxCompletionConsumerIndex(0) | 1038 CQCI_RxCompletionQ1ConsumerIndex(0)); 1039 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0); 1040 1041 /* 1042 * Initialize the Rx DMA control register. 1043 */ 1044 sf_funcreg_write(sc, SF_RxDmaCtrl, 1045 RDC_RxHighPriorityThreshold(6) | /* default */ 1046 RDC_RxBurstSize(4)); /* default */ 1047 1048 /* 1049 * Set the receive filter. 1050 */ 1051 sc->sc_RxAddressFilteringCtl = 0; 1052 sf_set_filter(sc); 1053 1054 /* 1055 * Set MacConfig1. When we set the media, MacConfig1 will 1056 * actually be written and the MAC part reset. 1057 */ 1058 sc->sc_MacConfig1 = MC1_PadEn; 1059 1060 /* 1061 * Set the media. 1062 */ 1063 if ((error = ether_mediachange(ifp)) != 0) 1064 goto out; 1065 1066 /* 1067 * Initialize the interrupt register. 1068 */ 1069 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt | 1070 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt | 1071 IS_StatisticWrapInt; 1072 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn); 1073 1074 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable | 1075 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT)); 1076 1077 /* 1078 * Start the transmit and receive processes. 1079 */ 1080 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 1081 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn); 1082 1083 /* Start the on second clock. */ 1084 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc); 1085 1086 /* 1087 * Note that the interface is now running. 1088 */ 1089 ifp->if_flags |= IFF_RUNNING; 1090 ifp->if_flags &= ~IFF_OACTIVE; 1091 1092 out: 1093 if (error) { 1094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1095 ifp->if_timer = 0; 1096 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1097 } 1098 return (error); 1099 } 1100 1101 /* 1102 * sf_rxdrain: 1103 * 1104 * Drain the receive queue. 1105 */ 1106 static void 1107 sf_rxdrain(struct sf_softc *sc) 1108 { 1109 struct sf_descsoft *ds; 1110 int i; 1111 1112 for (i = 0; i < SF_NRXDESC; i++) { 1113 ds = &sc->sc_rxsoft[i]; 1114 if (ds->ds_mbuf != NULL) { 1115 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1116 m_freem(ds->ds_mbuf); 1117 ds->ds_mbuf = NULL; 1118 } 1119 } 1120 } 1121 1122 /* 1123 * sf_stop: [ifnet interface function] 1124 * 1125 * Stop transmission on the interface. 1126 */ 1127 static void 1128 sf_stop(struct ifnet *ifp, int disable) 1129 { 1130 struct sf_softc *sc = ifp->if_softc; 1131 struct sf_descsoft *ds; 1132 int i; 1133 1134 /* Stop the one second clock. */ 1135 callout_stop(&sc->sc_tick_callout); 1136 1137 /* Down the MII. */ 1138 mii_down(&sc->sc_mii); 1139 1140 /* Disable interrupts. */ 1141 sf_funcreg_write(sc, SF_InterruptEn, 0); 1142 1143 /* Stop the transmit and receive processes. */ 1144 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0); 1145 1146 /* 1147 * Release any queued transmit buffers. 1148 */ 1149 for (i = 0; i < SF_NTXDESC; i++) { 1150 ds = &sc->sc_txsoft[i]; 1151 if (ds->ds_mbuf != NULL) { 1152 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1153 m_freem(ds->ds_mbuf); 1154 ds->ds_mbuf = NULL; 1155 } 1156 } 1157 1158 /* 1159 * Mark the interface down and cancel the watchdog timer. 1160 */ 1161 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1162 ifp->if_timer = 0; 1163 1164 if (disable) 1165 sf_rxdrain(sc); 1166 } 1167 1168 /* 1169 * sf_read_eeprom: 1170 * 1171 * Read from the Starfire EEPROM. 1172 */ 1173 static uint8_t 1174 sf_read_eeprom(struct sf_softc *sc, int offset) 1175 { 1176 uint32_t reg; 1177 1178 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3)); 1179 1180 return ((reg >> (8 * (offset & 3))) & 0xff); 1181 } 1182 1183 /* 1184 * sf_add_rxbuf: 1185 * 1186 * Add a receive buffer to the indicated descriptor. 1187 */ 1188 static int 1189 sf_add_rxbuf(struct sf_softc *sc, int idx) 1190 { 1191 struct sf_descsoft *ds = &sc->sc_rxsoft[idx]; 1192 struct mbuf *m; 1193 int error; 1194 1195 MGETHDR(m, M_DONTWAIT, MT_DATA); 1196 if (m == NULL) 1197 return (ENOBUFS); 1198 1199 MCLGET(m, M_DONTWAIT); 1200 if ((m->m_flags & M_EXT) == 0) { 1201 m_freem(m); 1202 return (ENOBUFS); 1203 } 1204 1205 if (ds->ds_mbuf != NULL) 1206 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1207 1208 ds->ds_mbuf = m; 1209 1210 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1211 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1212 BUS_DMA_READ|BUS_DMA_NOWAIT); 1213 if (error) { 1214 aprint_error_dev(sc->sc_dev, 1215 "can't load rx DMA map %d, error = %d\n", idx, error); 1216 panic("sf_add_rxbuf"); /* XXX */ 1217 } 1218 1219 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1220 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1221 1222 SF_INIT_RXDESC(sc, idx); 1223 1224 return (0); 1225 } 1226 1227 static void 1228 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr) 1229 { 1230 uint32_t reg0, reg1, reg2; 1231 1232 reg0 = enaddr[5] | (enaddr[4] << 8); 1233 reg1 = enaddr[3] | (enaddr[2] << 8); 1234 reg2 = enaddr[1] | (enaddr[0] << 8); 1235 1236 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0); 1237 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1); 1238 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2); 1239 } 1240 1241 static void 1242 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr) 1243 { 1244 uint32_t hash, slot, reg; 1245 1246 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23; 1247 slot = hash >> 4; 1248 1249 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10)); 1250 reg |= 1 << (hash & 0xf); 1251 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg); 1252 } 1253 1254 /* 1255 * sf_set_filter: 1256 * 1257 * Set the Starfire receive filter. 1258 */ 1259 static void 1260 sf_set_filter(struct sf_softc *sc) 1261 { 1262 struct ethercom *ec = &sc->sc_ethercom; 1263 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1264 struct ether_multi *enm; 1265 struct ether_multistep step; 1266 int i; 1267 1268 /* Start by clearing the perfect and hash tables. */ 1269 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t)) 1270 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0); 1271 1272 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t)) 1273 sf_genreg_write(sc, SF_HASH_BASE + i, 0); 1274 1275 /* 1276 * Clear the perfect and hash mode bits. 1277 */ 1278 sc->sc_RxAddressFilteringCtl &= 1279 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3)); 1280 1281 if (ifp->if_flags & IFF_BROADCAST) 1282 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast; 1283 else 1284 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast; 1285 1286 if (ifp->if_flags & IFF_PROMISC) { 1287 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode; 1288 goto allmulti; 1289 } else 1290 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode; 1291 1292 /* 1293 * Set normal perfect filtering mode. 1294 */ 1295 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1); 1296 1297 /* 1298 * First, write the station address to the perfect filter 1299 * table. 1300 */ 1301 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl)); 1302 1303 /* 1304 * Now set the hash bits for each multicast address in our 1305 * list. 1306 */ 1307 ETHER_FIRST_MULTI(step, ec, enm); 1308 if (enm == NULL) 1309 goto done; 1310 while (enm != NULL) { 1311 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1312 /* 1313 * We must listen to a range of multicast addresses. 1314 * For now, just accept all multicasts, rather than 1315 * trying to set only those filter bits needed to match 1316 * the range. (At this time, the only use of address 1317 * ranges is for IP multicast routing, for which the 1318 * range is big enough to require all bits set.) 1319 */ 1320 goto allmulti; 1321 } 1322 sf_set_filter_hash(sc, enm->enm_addrlo); 1323 ETHER_NEXT_MULTI(step, enm); 1324 } 1325 1326 /* 1327 * Set "hash only multicast dest, match regardless of VLAN ID". 1328 */ 1329 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2); 1330 goto done; 1331 1332 allmulti: 1333 /* 1334 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode. 1335 */ 1336 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast; 1337 ifp->if_flags |= IFF_ALLMULTI; 1338 1339 done: 1340 sf_funcreg_write(sc, SF_RxAddressFilteringCtl, 1341 sc->sc_RxAddressFilteringCtl); 1342 } 1343 1344 /* 1345 * sf_mii_read: [mii interface function] 1346 * 1347 * Read from the MII. 1348 */ 1349 static int 1350 sf_mii_read(device_t self, int phy, int reg) 1351 { 1352 struct sf_softc *sc = device_private(self); 1353 uint32_t v; 1354 int i; 1355 1356 for (i = 0; i < 1000; i++) { 1357 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)); 1358 if (v & MiiDataValid) 1359 break; 1360 delay(1); 1361 } 1362 1363 if ((v & MiiDataValid) == 0) 1364 return (0); 1365 1366 if (MiiRegDataPort(v) == 0xffff) 1367 return (0); 1368 1369 return (MiiRegDataPort(v)); 1370 } 1371 1372 /* 1373 * sf_mii_write: [mii interface function] 1374 * 1375 * Write to the MII. 1376 */ 1377 static void 1378 sf_mii_write(device_t self, int phy, int reg, int val) 1379 { 1380 struct sf_softc *sc = device_private(self); 1381 int i; 1382 1383 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val); 1384 1385 for (i = 0; i < 1000; i++) { 1386 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) & 1387 MiiBusy) == 0) 1388 return; 1389 delay(1); 1390 } 1391 1392 printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); 1393 } 1394 1395 /* 1396 * sf_mii_statchg: [mii interface function] 1397 * 1398 * Callback from the PHY when the media changes. 1399 */ 1400 static void 1401 sf_mii_statchg(struct ifnet *ifp) 1402 { 1403 struct sf_softc *sc = ifp->if_softc; 1404 uint32_t ipg; 1405 1406 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1407 sc->sc_MacConfig1 |= MC1_FullDuplex; 1408 ipg = 0x15; 1409 } else { 1410 sc->sc_MacConfig1 &= ~MC1_FullDuplex; 1411 ipg = 0x11; 1412 } 1413 1414 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1); 1415 sf_macreset(sc); 1416 1417 sf_genreg_write(sc, SF_BkToBkIPG, ipg); 1418 } 1419