1 /* $OpenBSD: gem.c,v 1.10 2001/10/02 15:24:09 jason Exp $ */ 2 /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2001 Eduardo Horvath. 7 * All rights reserved. 8 * 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #include "bpfilter.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/timeout.h> 43 #include <sys/mbuf.h> 44 #include <sys/syslog.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 52 #include <machine/endian.h> 53 54 #include <vm/vm.h> 55 #include <uvm/uvm_extern.h> 56 57 #include <net/if.h> 58 #include <net/if_dl.h> 59 #include <net/if_media.h> 60 61 #ifdef INET 62 #include <netinet/in.h> 63 #include <netinet/if_ether.h> 64 #endif 65 66 #if NBPFILTER > 0 67 #include <net/bpf.h> 68 #endif 69 70 #if NVLAN > 0 71 #include <net/if_vlan_var.h> 72 #endif 73 74 #include <machine/bus.h> 75 #include <machine/intr.h> 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 #include <dev/mii/mii_bitbang.h> 80 81 #include <dev/ic/gemreg.h> 82 #include <dev/ic/gemvar.h> 83 84 #define TRIES 10000 85 86 struct cfdriver gem_cd = { 87 NULL, "gem", DV_IFNET 88 }; 89 90 void gem_start __P((struct ifnet *)); 91 void gem_stop __P((struct ifnet *, int)); 92 int gem_ioctl __P((struct ifnet *, u_long, caddr_t)); 93 void gem_tick __P((void *)); 94 void gem_watchdog __P((struct ifnet *)); 95 void gem_shutdown __P((void *)); 96 int gem_init __P((struct ifnet *)); 97 void gem_init_regs(struct gem_softc *sc); 98 static int gem_ringsize(int sz); 99 int gem_meminit __P((struct gem_softc *)); 100 void gem_mifinit __P((struct gem_softc *)); 101 void gem_reset __P((struct gem_softc *)); 102 int gem_reset_rx(struct gem_softc *sc); 103 int gem_reset_tx(struct gem_softc *sc); 104 int gem_disable_rx(struct gem_softc *sc); 105 int gem_disable_tx(struct gem_softc *sc); 106 void gem_rxdrain(struct gem_softc *sc); 107 int gem_add_rxbuf(struct gem_softc *sc, int idx); 108 void gem_setladrf __P((struct gem_softc *)); 109 110 /* MII methods & callbacks */ 111 static int gem_mii_readreg __P((struct device *, int, int)); 112 static void gem_mii_writereg __P((struct device *, int, int, int)); 113 static void gem_mii_statchg __P((struct device *)); 114 115 int gem_mediachange __P((struct ifnet *)); 116 void gem_mediastatus __P((struct ifnet *, struct ifmediareq *)); 117 118 struct mbuf *gem_get __P((struct gem_softc *, int, int)); 119 int gem_put __P((struct gem_softc *, int, struct mbuf *)); 120 void gem_read __P((struct gem_softc *, int, int)); 121 int gem_eint __P((struct gem_softc *, u_int)); 122 int gem_rint __P((struct gem_softc *)); 123 int gem_tint __P((struct gem_softc *)); 124 void gem_power __P((int, void *)); 125 126 static int ether_cmp __P((u_char *, u_char *)); 127 128 #ifdef GEM_DEBUG 129 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 130 printf x 131 #else 132 #define DPRINTF(sc, x) /* nothing */ 133 #endif 134 135 136 /* 137 * gem_config: 138 * 139 * Attach a Gem interface to the system. 140 */ 141 void 142 gem_config(sc) 143 struct gem_softc *sc; 144 { 145 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 146 struct mii_data *mii = &sc->sc_mii; 147 struct mii_softc *child; 148 int i, error; 149 150 bcopy(sc->sc_enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 151 152 /* Make sure the chip is stopped. */ 153 ifp->if_softc = sc; 154 gem_reset(sc); 155 156 /* 157 * Allocate the control data structures, and create and load the 158 * DMA map for it. 159 */ 160 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 161 sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 162 1, &sc->sc_cdnseg, 0)) != 0) { 163 printf("%s: unable to allocate control data, error = %d\n", 164 sc->sc_dev.dv_xname, error); 165 goto fail_0; 166 } 167 168 /* XXX should map this in with correct endianness */ 169 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 170 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data, 171 BUS_DMA_COHERENT)) != 0) { 172 printf("%s: unable to map control data, error = %d\n", 173 sc->sc_dev.dv_xname, error); 174 goto fail_1; 175 } 176 177 if ((error = bus_dmamap_create(sc->sc_dmatag, 178 sizeof(struct gem_control_data), 1, 179 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 180 printf("%s: unable to create control data DMA map, " 181 "error = %d\n", sc->sc_dev.dv_xname, error); 182 goto fail_2; 183 } 184 185 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 186 sc->sc_control_data, sizeof(struct gem_control_data), NULL, 187 0)) != 0) { 188 printf("%s: unable to load control data DMA map, error = %d\n", 189 sc->sc_dev.dv_xname, error); 190 goto fail_3; 191 } 192 193 /* 194 * Initialize the transmit job descriptors. 195 */ 196 SIMPLEQ_INIT(&sc->sc_txfreeq); 197 SIMPLEQ_INIT(&sc->sc_txdirtyq); 198 199 /* 200 * Create the transmit buffer DMA maps. 201 */ 202 for (i = 0; i < GEM_TXQUEUELEN; i++) { 203 struct gem_txsoft *txs; 204 205 txs = &sc->sc_txsoft[i]; 206 txs->txs_mbuf = NULL; 207 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 208 GEM_NTXSEGS, MCLBYTES, 0, 0, 209 &txs->txs_dmamap)) != 0) { 210 printf("%s: unable to create tx DMA map %d, " 211 "error = %d\n", sc->sc_dev.dv_xname, i, error); 212 goto fail_4; 213 } 214 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 215 } 216 217 /* 218 * Create the receive buffer DMA maps. 219 */ 220 for (i = 0; i < GEM_NRXDESC; i++) { 221 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 222 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 223 printf("%s: unable to create rx DMA map %d, " 224 "error = %d\n", sc->sc_dev.dv_xname, i, error); 225 goto fail_5; 226 } 227 sc->sc_rxsoft[i].rxs_mbuf = NULL; 228 } 229 230 /* 231 * From this point forward, the attachment cannot fail. A failure 232 * before this point releases all resources that may have been 233 * allocated. 234 */ 235 236 /* Announce ourselves. */ 237 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 238 ether_sprintf(sc->sc_enaddr)); 239 240 /* Initialize ifnet structure. */ 241 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 242 ifp->if_softc = sc; 243 ifp->if_flags = 244 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 245 ifp->if_start = gem_start; 246 ifp->if_ioctl = gem_ioctl; 247 ifp->if_watchdog = gem_watchdog; 248 IFQ_SET_READY(&ifp->if_snd); 249 250 /* Initialize ifmedia structures and MII info */ 251 mii->mii_ifp = ifp; 252 mii->mii_readreg = gem_mii_readreg; 253 mii->mii_writereg = gem_mii_writereg; 254 mii->mii_statchg = gem_mii_statchg; 255 256 ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); 257 258 gem_mifinit(sc); 259 260 mii_attach(&sc->sc_dev, mii, 0xffffffff, 261 MII_PHY_ANY, MII_OFFSET_ANY, 0); 262 263 child = LIST_FIRST(&mii->mii_phys); 264 if (child == NULL) { 265 /* No PHY attached */ 266 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 267 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 268 } else { 269 /* 270 * Walk along the list of attached MII devices and 271 * establish an `MII instance' to `phy number' 272 * mapping. We'll use this mapping in media change 273 * requests to determine which phy to use to program 274 * the MIF configuration register. 275 */ 276 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 277 /* 278 * Note: we support just two PHYs: the built-in 279 * internal device and an external on the MII 280 * connector. 281 */ 282 if (child->mii_phy > 1 || child->mii_inst > 1) { 283 printf("%s: cannot accomodate MII device %s" 284 " at phy %d, instance %d\n", 285 sc->sc_dev.dv_xname, 286 child->mii_dev.dv_xname, 287 child->mii_phy, child->mii_inst); 288 continue; 289 } 290 291 sc->sc_phys[child->mii_inst] = child->mii_phy; 292 } 293 294 /* 295 * Now select and activate the PHY we will use. 296 * 297 * The order of preference is External (MDI1), 298 * Internal (MDI0), Serial Link (no MII). 299 */ 300 if (sc->sc_phys[1]) { 301 #ifdef DEBUG 302 printf("using external phy\n"); 303 #endif 304 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 305 } else { 306 #ifdef DEBUG 307 printf("using internal phy\n"); 308 #endif 309 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 310 } 311 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 312 sc->sc_mif_config); 313 314 /* 315 * XXX - we can really do the following ONLY if the 316 * phy indeed has the auto negotiation capability!! 317 */ 318 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 319 } 320 321 /* Attach the interface. */ 322 if_attach(ifp); 323 ether_ifattach(ifp); 324 325 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc); 326 if (sc->sc_sh == NULL) 327 panic("gem_config: can't establish shutdownhook"); 328 329 timeout_set(&sc->sc_tick_ch, gem_tick, sc); 330 return; 331 332 /* 333 * Free any resources we've allocated during the failed attach 334 * attempt. Do this in reverse order and fall through. 335 */ 336 fail_5: 337 for (i = 0; i < GEM_NRXDESC; i++) { 338 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 339 bus_dmamap_destroy(sc->sc_dmatag, 340 sc->sc_rxsoft[i].rxs_dmamap); 341 } 342 fail_4: 343 for (i = 0; i < GEM_TXQUEUELEN; i++) { 344 if (sc->sc_txsoft[i].txs_dmamap != NULL) 345 bus_dmamap_destroy(sc->sc_dmatag, 346 sc->sc_txsoft[i].txs_dmamap); 347 } 348 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 349 fail_3: 350 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 351 fail_2: 352 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 353 sizeof(struct gem_control_data)); 354 fail_1: 355 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 356 fail_0: 357 return; 358 } 359 360 361 void 362 gem_tick(arg) 363 void *arg; 364 { 365 struct gem_softc *sc = arg; 366 int s; 367 368 s = splimp(); 369 mii_tick(&sc->sc_mii); 370 splx(s); 371 372 timeout_add(&sc->sc_tick_ch, hz); 373 } 374 375 void 376 gem_reset(sc) 377 struct gem_softc *sc; 378 { 379 bus_space_tag_t t = sc->sc_bustag; 380 bus_space_handle_t h = sc->sc_h; 381 int i; 382 int s; 383 384 s = splimp(); 385 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); 386 gem_reset_rx(sc); 387 gem_reset_tx(sc); 388 389 /* Do a full reset */ 390 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX); 391 for (i=TRIES; i--; delay(100)) 392 if ((bus_space_read_4(t, h, GEM_RESET) & 393 (GEM_RESET_RX|GEM_RESET_TX)) == 0) 394 break; 395 if ((bus_space_read_4(t, h, GEM_RESET) & 396 (GEM_RESET_RX|GEM_RESET_TX)) != 0) { 397 printf("%s: cannot reset device\n", 398 sc->sc_dev.dv_xname); 399 } 400 splx(s); 401 } 402 403 404 /* 405 * gem_rxdrain: 406 * 407 * Drain the receive queue. 408 */ 409 void 410 gem_rxdrain(struct gem_softc *sc) 411 { 412 struct gem_rxsoft *rxs; 413 int i; 414 415 for (i = 0; i < GEM_NRXDESC; i++) { 416 rxs = &sc->sc_rxsoft[i]; 417 if (rxs->rxs_mbuf != NULL) { 418 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 419 m_freem(rxs->rxs_mbuf); 420 rxs->rxs_mbuf = NULL; 421 } 422 } 423 } 424 425 /* 426 * Reset the whole thing. 427 */ 428 void 429 gem_stop(struct ifnet *ifp, int disable) 430 { 431 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 432 struct gem_txsoft *txs; 433 434 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); 435 436 timeout_del(&sc->sc_tick_ch); 437 mii_down(&sc->sc_mii); 438 439 /* XXX - Should we reset these instead? */ 440 gem_disable_rx(sc); 441 gem_disable_rx(sc); 442 443 /* 444 * Release any queued transmit buffers. 445 */ 446 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 447 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q); 448 if (txs->txs_mbuf != NULL) { 449 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 450 m_freem(txs->txs_mbuf); 451 txs->txs_mbuf = NULL; 452 } 453 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 454 } 455 456 if (disable) { 457 gem_rxdrain(sc); 458 } 459 460 /* 461 * Mark the interface down and cancel the watchdog timer. 462 */ 463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 464 ifp->if_timer = 0; 465 } 466 467 468 /* 469 * Reset the receiver 470 */ 471 int 472 gem_reset_rx(struct gem_softc *sc) 473 { 474 bus_space_tag_t t = sc->sc_bustag; 475 bus_space_handle_t h = sc->sc_h; 476 int i; 477 478 479 /* 480 * Resetting while DMA is in progress can cause a bus hang, so we 481 * disable DMA first. 482 */ 483 gem_disable_rx(sc); 484 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 485 /* Wait till it finishes */ 486 for (i = TRIES; i--; delay(100)) 487 if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) == 0) 488 break; 489 if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) != 0) 490 printf("%s: cannot disable read dma\n", 491 sc->sc_dev.dv_xname); 492 493 /* Wait 5ms extra. */ 494 delay(5000); 495 496 /* Finally, reset the ERX */ 497 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 498 /* Wait till it finishes */ 499 for (i = TRIES; i--; delay(100)) 500 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) == 0) 501 break; 502 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) != 0) { 503 printf("%s: cannot reset receiver\n", 504 sc->sc_dev.dv_xname); 505 return (1); 506 } 507 return (0); 508 } 509 510 511 /* 512 * Reset the transmitter 513 */ 514 int 515 gem_reset_tx(struct gem_softc *sc) 516 { 517 bus_space_tag_t t = sc->sc_bustag; 518 bus_space_handle_t h = sc->sc_h; 519 int i; 520 521 /* 522 * Resetting while DMA is in progress can cause a bus hang, so we 523 * disable DMA first. 524 */ 525 gem_disable_tx(sc); 526 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 527 /* Wait till it finishes */ 528 for (i = TRIES; i--; delay(100)) 529 if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) == 0) 530 break; 531 if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) != 0) 532 printf("%s: cannot disable read dma\n", 533 sc->sc_dev.dv_xname); 534 535 /* Wait 5ms extra. */ 536 delay(5000); 537 538 /* Finally, reset the ETX */ 539 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 540 /* Wait till it finishes */ 541 for (i = TRIES; i--; delay(100)) 542 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 543 break; 544 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) != 0) { 545 printf("%s: cannot reset receiver\n", 546 sc->sc_dev.dv_xname); 547 return (1); 548 } 549 return (0); 550 } 551 552 /* 553 * disable receiver. 554 */ 555 int 556 gem_disable_rx(struct gem_softc *sc) 557 { 558 bus_space_tag_t t = sc->sc_bustag; 559 bus_space_handle_t h = sc->sc_h; 560 int i; 561 u_int32_t cfg; 562 563 /* Flip the enable bit */ 564 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 565 cfg &= ~GEM_MAC_RX_ENABLE; 566 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 567 568 /* Wait for it to finish */ 569 for (i = TRIES; i--; delay(100)) 570 if ((bus_space_read_4(t, h, GEM_MAC_RX_CONFIG) & 571 GEM_MAC_RX_ENABLE) == 0) 572 return (0); 573 return (1); 574 } 575 576 /* 577 * disable transmitter. 578 */ 579 int 580 gem_disable_tx(struct gem_softc *sc) 581 { 582 bus_space_tag_t t = sc->sc_bustag; 583 bus_space_handle_t h = sc->sc_h; 584 int i; 585 u_int32_t cfg; 586 587 /* Flip the enable bit */ 588 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 589 cfg &= ~GEM_MAC_TX_ENABLE; 590 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 591 592 /* Wait for it to finish */ 593 for (i = TRIES; i--; delay(100)) 594 if ((bus_space_read_4(t, h, GEM_MAC_TX_CONFIG) & 595 GEM_MAC_TX_ENABLE) == 0) 596 return (0); 597 return (1); 598 } 599 600 /* 601 * Initialize interface. 602 */ 603 int 604 gem_meminit(struct gem_softc *sc) 605 { 606 struct gem_rxsoft *rxs; 607 int i, error; 608 609 /* 610 * Initialize the transmit descriptor ring. 611 */ 612 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 613 for (i = 0; i < GEM_NTXDESC; i++) { 614 sc->sc_txdescs[i].gd_flags = 0; 615 sc->sc_txdescs[i].gd_addr = 0; 616 } 617 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 618 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 619 sc->sc_txfree = GEM_NTXDESC; 620 sc->sc_txnext = 0; 621 622 /* 623 * Initialize the receive descriptor and receive job 624 * descriptor rings. 625 */ 626 for (i = 0; i < GEM_NRXDESC; i++) { 627 rxs = &sc->sc_rxsoft[i]; 628 if (rxs->rxs_mbuf == NULL) { 629 if ((error = gem_add_rxbuf(sc, i)) != 0) { 630 printf("%s: unable to allocate or map rx " 631 "buffer %d, error = %d\n", 632 sc->sc_dev.dv_xname, i, error); 633 /* 634 * XXX Should attempt to run with fewer receive 635 * XXX buffers instead of just failing. 636 */ 637 gem_rxdrain(sc); 638 return (1); 639 } 640 } else 641 GEM_INIT_RXDESC(sc, i); 642 } 643 sc->sc_rxptr = 0; 644 645 return (0); 646 } 647 648 static int 649 gem_ringsize(int sz) 650 { 651 int v; 652 653 switch (sz) { 654 case 32: 655 v = GEM_RING_SZ_32; 656 break; 657 case 64: 658 v = GEM_RING_SZ_64; 659 break; 660 case 128: 661 v = GEM_RING_SZ_128; 662 break; 663 case 256: 664 v = GEM_RING_SZ_256; 665 break; 666 case 512: 667 v = GEM_RING_SZ_512; 668 break; 669 case 1024: 670 v = GEM_RING_SZ_1024; 671 break; 672 case 2048: 673 v = GEM_RING_SZ_2048; 674 break; 675 case 4096: 676 v = GEM_RING_SZ_4096; 677 break; 678 case 8192: 679 v = GEM_RING_SZ_8192; 680 break; 681 default: 682 printf("gem: invalid Receive Descriptor ring size\n"); 683 break; 684 } 685 return (v); 686 } 687 688 /* 689 * Initialization of interface; set up initialization block 690 * and transmit/receive descriptor rings. 691 */ 692 int 693 gem_init(struct ifnet *ifp) 694 { 695 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 696 bus_space_tag_t t = sc->sc_bustag; 697 bus_space_handle_t h = sc->sc_h; 698 int s; 699 u_int32_t v; 700 701 s = splimp(); 702 703 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); 704 /* 705 * Initialization sequence. The numbered steps below correspond 706 * to the sequence outlined in section 6.3.5.1 in the Ethernet 707 * Channel Engine manual (part of the PCIO manual). 708 * See also the STP2002-STQ document from Sun Microsystems. 709 */ 710 711 /* step 1 & 2. Reset the Ethernet Channel */ 712 gem_stop(ifp, 0); 713 gem_reset(sc); 714 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); 715 716 /* Re-initialize the MIF */ 717 gem_mifinit(sc); 718 719 /* Call MI reset function if any */ 720 if (sc->sc_hwreset) 721 (*sc->sc_hwreset)(sc); 722 723 /* step 3. Setup data structures in host memory */ 724 gem_meminit(sc); 725 726 /* step 4. TX MAC registers & counters */ 727 gem_init_regs(sc); 728 v = (GEM_MTU) | (0x2000 << 16) /* Burst size */; 729 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 730 731 /* step 5. RX MAC registers & counters */ 732 gem_setladrf(sc); 733 734 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 735 bus_space_write_8(t, h, GEM_TX_RING_PTR, 736 GEM_CDTXADDR(sc, 0)); 737 /* Yeeech. The following has endianness issues. */ 738 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 739 (((uint64_t)GEM_CDRXADDR(sc, 0))>>32)); 740 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, 741 GEM_CDRXADDR(sc, 0)); 742 743 /* step 8. Global Configuration & Interrupt Mask */ 744 bus_space_write_4(t, h, GEM_INTMASK, 745 ~(GEM_INTR_TX_INTME| 746 GEM_INTR_TX_EMPTY| 747 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 748 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 749 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 750 GEM_INTR_BERR)); 751 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 0); /* XXXX */ 752 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 753 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 754 755 /* step 9. ETX Configuration: use mostly default values */ 756 757 /* Enable DMA */ 758 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 759 bus_space_write_4(t, h, GEM_TX_CONFIG, 760 v|GEM_TX_CONFIG_TXDMA_EN| 761 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 762 bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext); 763 764 /* step 10. ERX Configuration */ 765 766 /* Encode Receive Descriptor ring size: four possible values */ 767 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 768 769 /* Enable DMA */ 770 bus_space_write_4(t, h, GEM_RX_CONFIG, 771 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 772 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 773 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 774 /* 775 * The following value is for an OFF Threshold of about 15.5 Kbytes 776 * and an ON Threshold of 4K bytes. 777 */ 778 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 0xf8 | (0x40 << 12)); 779 bus_space_write_4(t, h, GEM_RX_BLANKING, (2<<12)|6); 780 781 /* step 11. Configure Media */ 782 gem_mii_statchg(&sc->sc_dev); 783 784 /* step 12. RX_MAC Configuration Register */ 785 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 786 v |= GEM_MAC_RX_ENABLE; 787 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 788 789 /* step 14. Issue Transmit Pending command */ 790 791 /* Call MI initialization function if any */ 792 if (sc->sc_hwinit) 793 (*sc->sc_hwinit)(sc); 794 795 796 /* step 15. Give the reciever a swift kick */ 797 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 798 799 /* Start the one second timer. */ 800 timeout_add(&sc->sc_tick_ch, hz); 801 802 ifp->if_flags |= IFF_RUNNING; 803 ifp->if_flags &= ~IFF_OACTIVE; 804 ifp->if_timer = 0; 805 splx(s); 806 807 return (0); 808 } 809 810 /* 811 * Compare two Ether/802 addresses for equality, inlined and unrolled for 812 * speed. 813 */ 814 static __inline__ int 815 ether_cmp(a, b) 816 u_char *a, *b; 817 { 818 819 if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] || 820 a[2] != b[2] || a[1] != b[1] || a[0] != b[0]) 821 return (0); 822 return (1); 823 } 824 825 826 void 827 gem_init_regs(struct gem_softc *sc) 828 { 829 bus_space_tag_t t = sc->sc_bustag; 830 bus_space_handle_t h = sc->sc_h; 831 u_int32_t v; 832 833 /* These regs are not cleared on reset */ 834 sc->sc_inited = 0; 835 if (!sc->sc_inited) { 836 837 /* Wooo. Magic values. */ 838 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 839 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 840 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 841 842 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 843 /* Max frame and max burst size */ 844 v = (GEM_MTU) | (0x2000 << 16) /* Burst size */; 845 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 846 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 847 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 848 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 849 /* Dunno.... */ 850 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 851 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 852 ((sc->sc_enaddr[5]<<8)|sc->sc_enaddr[4])&0x3ff); 853 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 854 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 855 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 856 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 857 /* MAC control addr set to 0:1:c2:0:1:80 */ 858 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 859 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 860 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 861 862 /* MAC filter addr set to 0:0:0:0:0:0 */ 863 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 864 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 865 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 866 867 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 868 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 869 870 sc->sc_inited = 1; 871 } 872 873 /* Counters need to be zeroed */ 874 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 875 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 876 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 877 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 878 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 879 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 880 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 881 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 882 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 883 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 884 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 885 886 /* Un-pause stuff */ 887 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 888 889 /* 890 * Set the station address. 891 */ 892 bus_space_write_4(t, h, GEM_MAC_ADDR0, 893 (sc->sc_enaddr[4]<<8) | sc->sc_enaddr[5]); 894 bus_space_write_4(t, h, GEM_MAC_ADDR1, 895 (sc->sc_enaddr[2]<<8) | sc->sc_enaddr[3]); 896 bus_space_write_4(t, h, GEM_MAC_ADDR2, 897 (sc->sc_enaddr[0]<<8) | sc->sc_enaddr[1]); 898 899 } 900 901 902 903 void 904 gem_start(ifp) 905 struct ifnet *ifp; 906 { 907 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 908 struct mbuf *m0, *m; 909 struct gem_txsoft *txs, *last_txs; 910 bus_dmamap_t dmamap; 911 int error, firsttx, nexttx, lasttx, ofree, seg; 912 913 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 914 return; 915 916 /* 917 * Remember the previous number of free descriptors and 918 * the first descriptor we'll use. 919 */ 920 ofree = sc->sc_txfree; 921 firsttx = sc->sc_txnext; 922 923 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 924 sc->sc_dev.dv_xname, ofree, firsttx)); 925 926 /* 927 * Loop through the send queue, setting up transmit descriptors 928 * until we drain the queue, or use up all available transmit 929 * descriptors. 930 */ 931 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 932 sc->sc_txfree != 0) { 933 /* 934 * Grab a packet off the queue. 935 */ 936 IFQ_POLL(&ifp->if_snd, m0); 937 if (m0 == NULL) 938 break; 939 m = NULL; 940 941 dmamap = txs->txs_dmamap; 942 943 /* 944 * Load the DMA map. If this fails, the packet either 945 * didn't fit in the alloted number of segments, or we were 946 * short on resources. In this case, we'll copy and try 947 * again. 948 */ 949 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0, 950 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 951 MGETHDR(m, M_DONTWAIT, MT_DATA); 952 if (m == NULL) { 953 printf("%s: unable to allocate Tx mbuf\n", 954 sc->sc_dev.dv_xname); 955 break; 956 } 957 if (m0->m_pkthdr.len > MHLEN) { 958 MCLGET(m, M_DONTWAIT); 959 if ((m->m_flags & M_EXT) == 0) { 960 printf("%s: unable to allocate Tx " 961 "cluster\n", sc->sc_dev.dv_xname); 962 m_freem(m); 963 break; 964 } 965 } 966 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 967 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 968 error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, 969 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 970 if (error) { 971 printf("%s: unable to load Tx buffer, " 972 "error = %d\n", sc->sc_dev.dv_xname, error); 973 break; 974 } 975 } 976 977 /* 978 * Ensure we have enough descriptors free to describe 979 * the packet. 980 */ 981 if (dmamap->dm_nsegs > sc->sc_txfree) { 982 /* 983 * Not enough free descriptors to transmit this 984 * packet. We haven't committed to anything yet, 985 * so just unload the DMA map, put the packet 986 * back on the queue, and punt. Notify the upper 987 * layer that there are no more slots left. 988 * 989 * XXX We could allocate an mbuf and copy, but 990 * XXX it is worth it? 991 */ 992 ifp->if_flags |= IFF_OACTIVE; 993 bus_dmamap_unload(sc->sc_dmatag, dmamap); 994 if (m != NULL) 995 m_freem(m); 996 break; 997 } 998 999 IFQ_DEQUEUE(&ifp->if_snd, m0); 1000 if (m != NULL) { 1001 m_freem(m0); 1002 m0 = m; 1003 } 1004 1005 /* 1006 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1007 */ 1008 1009 /* Sync the DMA map. */ 1010 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize, 1011 BUS_DMASYNC_PREWRITE); 1012 1013 /* 1014 * Initialize the transmit descriptors. 1015 */ 1016 for (nexttx = sc->sc_txnext, seg = 0; 1017 seg < dmamap->dm_nsegs; 1018 seg++, nexttx = GEM_NEXTTX(nexttx)) { 1019 uint64_t flags; 1020 1021 /* 1022 * If this is the first descriptor we're 1023 * enqueueing, set the start of packet flag, 1024 * and the checksum stuff if we want the hardware 1025 * to do it. 1026 */ 1027 sc->sc_txdescs[nexttx].gd_addr = 1028 GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr); 1029 flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE; 1030 if (nexttx == firsttx) 1031 flags |= GEM_TD_START_OF_PACKET; 1032 if (seg == dmamap->dm_nsegs - 1) 1033 flags |= GEM_TD_END_OF_PACKET; 1034 sc->sc_txdescs[nexttx].gd_flags = 1035 GEM_DMA_WRITE(sc, flags); 1036 lasttx = nexttx; 1037 } 1038 1039 #ifdef GEM_DEBUG 1040 if (ifp->if_flags & IFF_DEBUG) { 1041 printf(" gem_start %p transmit chain:\n", txs); 1042 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1043 printf("descriptor %d:\t", seg); 1044 printf("gd_flags: 0x%016llx\t", (long long) 1045 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1046 printf("gd_addr: 0x%016llx\n", (long long) 1047 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1048 if (seg == lasttx) 1049 break; 1050 } 1051 } 1052 #endif 1053 1054 /* Sync the descriptors we're using. */ 1055 GEM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1056 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1057 1058 /* 1059 * Store a pointer to the packet so we can free it later, 1060 * and remember what txdirty will be once the packet is 1061 * done. 1062 */ 1063 txs->txs_mbuf = m0; 1064 txs->txs_firstdesc = sc->sc_txnext; 1065 txs->txs_lastdesc = lasttx; 1066 txs->txs_ndescs = dmamap->dm_nsegs; 1067 1068 /* Advance the tx pointer. */ 1069 sc->sc_txfree -= dmamap->dm_nsegs; 1070 sc->sc_txnext = nexttx; 1071 1072 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q); 1073 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1074 1075 last_txs = txs; 1076 1077 #if NBPFILTER > 0 1078 /* 1079 * Pass the packet to any BPF listeners. 1080 */ 1081 if (ifp->if_bpf) 1082 bpf_mtap(ifp->if_bpf, m0); 1083 #endif /* NBPFILTER > 0 */ 1084 } 1085 1086 if (txs == NULL || sc->sc_txfree == 0) { 1087 /* No more slots left; notify upper layer. */ 1088 ifp->if_flags |= IFF_OACTIVE; 1089 } 1090 1091 if (sc->sc_txfree != ofree) { 1092 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1093 sc->sc_dev.dv_xname, lasttx, firsttx)); 1094 /* 1095 * The entire packet chain is set up. 1096 * Kick the transmitter. 1097 */ 1098 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1099 sc->sc_dev.dv_xname, nexttx)); 1100 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1101 sc->sc_txnext); 1102 1103 /* Set a watchdog timer in case the chip flakes out. */ 1104 ifp->if_timer = 5; 1105 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1106 sc->sc_dev.dv_xname, ifp->if_timer)); 1107 } 1108 } 1109 1110 /* 1111 * Transmit interrupt. 1112 */ 1113 int 1114 gem_tint(sc) 1115 struct gem_softc *sc; 1116 { 1117 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1118 bus_space_tag_t t = sc->sc_bustag; 1119 bus_space_handle_t mac = sc->sc_h; 1120 struct gem_txsoft *txs; 1121 int txlast; 1122 1123 1124 DPRINTF(sc, ("%s: gem_tint\n", sc->sc_dev.dv_xname)); 1125 1126 /* 1127 * Unload collision counters 1128 */ 1129 ifp->if_collisions += 1130 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1131 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1132 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1133 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1134 1135 /* 1136 * then clear the hardware counters. 1137 */ 1138 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1139 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1140 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1141 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1142 1143 /* 1144 * Go through our Tx list and free mbufs for those 1145 * frames that have been transmitted. 1146 */ 1147 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1148 GEM_CDTXSYNC(sc, txs->txs_firstdesc, 1149 txs->txs_ndescs, 1150 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1151 1152 #ifdef GEM_DEBUG 1153 if (ifp->if_flags & IFF_DEBUG) { 1154 int i; 1155 printf(" txsoft %p transmit chain:\n", txs); 1156 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1157 printf("descriptor %d: ", i); 1158 printf("gd_flags: 0x%016llx\t", (long long) 1159 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1160 printf("gd_addr: 0x%016llx\n", (long long) 1161 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1162 if (i == txs->txs_lastdesc) 1163 break; 1164 } 1165 } 1166 #endif 1167 1168 /* 1169 * In theory, we could harveast some descriptors before 1170 * the ring is empty, but that's a bit complicated. 1171 * 1172 * GEM_TX_COMPLETION points to the last descriptor 1173 * processed +1. 1174 */ 1175 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1176 DPRINTF(sc, 1177 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1178 txs->txs_lastdesc, txlast)); 1179 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1180 if ((txlast >= txs->txs_firstdesc) && 1181 (txlast <= txs->txs_lastdesc)) 1182 break; 1183 } else { 1184 /* Ick -- this command wraps */ 1185 if ((txlast >= txs->txs_firstdesc) || 1186 (txlast <= txs->txs_lastdesc)) 1187 break; 1188 } 1189 1190 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1191 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q); 1192 1193 sc->sc_txfree += txs->txs_ndescs; 1194 1195 #ifdef DIAGNOSTIC 1196 if (txs->txs_mbuf == NULL) { 1197 panic("gem_txintr: null mbuf"); 1198 } 1199 #endif 1200 1201 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1202 0, txs->txs_dmamap->dm_mapsize, 1203 BUS_DMASYNC_POSTWRITE); 1204 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1205 m_freem(txs->txs_mbuf); 1206 txs->txs_mbuf = NULL; 1207 1208 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1209 1210 ifp->if_opackets++; 1211 } 1212 1213 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1214 "GEM_TX_DATA_PTR %llx " 1215 "GEM_TX_COMPLETION %x\n", 1216 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1217 (long long)bus_space_read_8(sc->sc_bustag, sc->sc_h, 1218 GEM_TX_DATA_PTR), 1219 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1220 1221 gem_start(ifp); 1222 1223 if (SIMPLEQ_FIRST(&sc->sc_txdirtyq) == NULL) 1224 ifp->if_timer = 0; 1225 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1226 sc->sc_dev.dv_xname, ifp->if_timer)); 1227 1228 return (1); 1229 } 1230 1231 /* 1232 * Receive interrupt. 1233 */ 1234 int 1235 gem_rint(sc) 1236 struct gem_softc *sc; 1237 { 1238 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1239 bus_space_tag_t t = sc->sc_bustag; 1240 bus_space_handle_t h = sc->sc_h; 1241 struct ether_header *eh; 1242 struct gem_rxsoft *rxs; 1243 struct mbuf *m; 1244 u_int64_t rxstat; 1245 int i, len; 1246 1247 DPRINTF(sc, ("%s: gem_rint: sc_flags 0x%08x\n", 1248 sc->sc_dev.dv_xname, sc->sc_flags)); 1249 /* 1250 * XXXX Read the lastrx only once at the top for speed. 1251 */ 1252 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1253 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1254 for (i = sc->sc_rxptr; i != bus_space_read_4(t, h, GEM_RX_COMPLETION); 1255 i = GEM_NEXTRX(i)) { 1256 rxs = &sc->sc_rxsoft[i]; 1257 1258 GEM_CDRXSYNC(sc, i, 1259 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1260 1261 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1262 1263 if (rxstat & GEM_RD_OWN) { 1264 printf("gem_rint: completed descriptor " 1265 "still owned %d\n", i); 1266 /* 1267 * We have processed all of the receive buffers. 1268 */ 1269 break; 1270 } 1271 1272 if (rxstat & GEM_RD_BAD_CRC) { 1273 printf("%s: receive error: CRC error\n", 1274 sc->sc_dev.dv_xname); 1275 GEM_INIT_RXDESC(sc, i); 1276 continue; 1277 } 1278 1279 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1280 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1281 #ifdef GEM_DEBUG 1282 if (ifp->if_flags & IFF_DEBUG) { 1283 printf(" rxsoft %p descriptor %d: ", rxs, i); 1284 printf("gd_flags: 0x%016llx\t", (long long) 1285 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1286 printf("gd_addr: 0x%016llx\n", (long long) 1287 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1288 } 1289 #endif 1290 1291 /* 1292 * No errors; receive the packet. Note the Gem 1293 * includes the CRC with every packet. 1294 */ 1295 len = GEM_RD_BUFLEN(rxstat); 1296 1297 /* 1298 * Allocate a new mbuf cluster. If that fails, we are 1299 * out of memory, and must drop the packet and recycle 1300 * the buffer that's already attached to this descriptor. 1301 */ 1302 m = rxs->rxs_mbuf; 1303 if (gem_add_rxbuf(sc, i) != 0) { 1304 ifp->if_ierrors++; 1305 GEM_INIT_RXDESC(sc, i); 1306 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1307 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1308 continue; 1309 } 1310 m->m_data += 2; /* We're already off by two */ 1311 1312 ifp->if_ipackets++; 1313 eh = mtod(m, struct ether_header *); 1314 m->m_pkthdr.rcvif = ifp; 1315 m->m_pkthdr.len = m->m_len = len; 1316 1317 #if NBPFILTER > 0 1318 /* 1319 * Pass this up to any BPF listeners, but only 1320 * pass it up the stack if its for us. 1321 */ 1322 if (ifp->if_bpf) 1323 bpf_mtap(ifp->if_bpf, m); 1324 #endif /* NPBFILTER > 0 */ 1325 1326 /* Pass it on. */ 1327 ether_input_mbuf(ifp, m); 1328 } 1329 1330 /* Update the receive pointer. */ 1331 sc->sc_rxptr = i; 1332 bus_space_write_4(t, h, GEM_RX_KICK, i); 1333 1334 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1335 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1336 1337 return (1); 1338 } 1339 1340 1341 /* 1342 * gem_add_rxbuf: 1343 * 1344 * Add a receive buffer to the indicated descriptor. 1345 */ 1346 int 1347 gem_add_rxbuf(struct gem_softc *sc, int idx) 1348 { 1349 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1350 struct mbuf *m; 1351 int error; 1352 1353 MGETHDR(m, M_DONTWAIT, MT_DATA); 1354 if (m == NULL) 1355 return (ENOBUFS); 1356 1357 MCLGET(m, M_DONTWAIT); 1358 if ((m->m_flags & M_EXT) == 0) { 1359 m_freem(m); 1360 return (ENOBUFS); 1361 } 1362 1363 #ifdef GEM_DEBUG 1364 /* bzero the packet to check dma */ 1365 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1366 #endif 1367 1368 if (rxs->rxs_mbuf != NULL) 1369 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1370 1371 rxs->rxs_mbuf = m; 1372 1373 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1374 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1375 BUS_DMA_READ|BUS_DMA_NOWAIT); 1376 if (error) { 1377 printf("%s: can't load rx DMA map %d, error = %d\n", 1378 sc->sc_dev.dv_xname, idx, error); 1379 panic("gem_add_rxbuf"); /* XXX */ 1380 } 1381 1382 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1383 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1384 1385 GEM_INIT_RXDESC(sc, idx); 1386 1387 return (0); 1388 } 1389 1390 1391 int 1392 gem_eint(sc, status) 1393 struct gem_softc *sc; 1394 u_int status; 1395 { 1396 if ((status & GEM_INTR_MIF) != 0) { 1397 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname); 1398 return (1); 1399 } 1400 1401 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS); 1402 return (1); 1403 } 1404 1405 1406 int 1407 gem_intr(v) 1408 void *v; 1409 { 1410 struct gem_softc *sc = (struct gem_softc *)v; 1411 bus_space_tag_t t = sc->sc_bustag; 1412 bus_space_handle_t seb = sc->sc_h; 1413 u_int32_t status; 1414 int r = 0; 1415 1416 status = bus_space_read_4(t, seb, GEM_STATUS); 1417 DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", 1418 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); 1419 1420 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1421 r |= gem_eint(sc, status); 1422 1423 if ((status & 1424 (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) 1425 != 0) 1426 r |= gem_tint(sc); 1427 1428 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1429 r |= gem_rint(sc); 1430 1431 /* We should eventually do more than just print out error stats. */ 1432 if (status & GEM_INTR_TX_MAC) { 1433 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1434 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1435 printf("MAC tx fault, status %x\n", txstat); 1436 } 1437 if (status & GEM_INTR_RX_MAC) { 1438 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1439 if (rxstat & ~GEM_MAC_RX_DONE) 1440 printf("MAC rx fault, status %x\n", rxstat); 1441 } 1442 return (r); 1443 } 1444 1445 1446 void 1447 gem_watchdog(ifp) 1448 struct ifnet *ifp; 1449 { 1450 struct gem_softc *sc = ifp->if_softc; 1451 1452 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1453 "GEM_MAC_RX_CONFIG %x\n", 1454 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1455 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1456 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1457 1458 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1459 ++ifp->if_oerrors; 1460 1461 /* Try to get more packets going. */ 1462 gem_start(ifp); 1463 } 1464 1465 /* 1466 * Initialize the MII Management Interface 1467 */ 1468 void 1469 gem_mifinit(sc) 1470 struct gem_softc *sc; 1471 { 1472 bus_space_tag_t t = sc->sc_bustag; 1473 bus_space_handle_t mif = sc->sc_h; 1474 1475 /* Configure the MIF in frame mode */ 1476 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1477 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1478 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1479 } 1480 1481 /* 1482 * MII interface 1483 * 1484 * The GEM MII interface supports at least three different operating modes: 1485 * 1486 * Bitbang mode is implemented using data, clock and output enable registers. 1487 * 1488 * Frame mode is implemented by loading a complete frame into the frame 1489 * register and polling the valid bit for completion. 1490 * 1491 * Polling mode uses the frame register but completion is indicated by 1492 * an interrupt. 1493 * 1494 */ 1495 static int 1496 gem_mii_readreg(self, phy, reg) 1497 struct device *self; 1498 int phy, reg; 1499 { 1500 struct gem_softc *sc = (void *)self; 1501 bus_space_tag_t t = sc->sc_bustag; 1502 bus_space_handle_t mif = sc->sc_h; 1503 int n; 1504 u_int32_t v; 1505 1506 #ifdef GEM_DEBUG1 1507 if (sc->sc_debug) 1508 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1509 #endif 1510 1511 /* Construct the frame command */ 1512 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1513 GEM_MIF_FRAME_READ; 1514 1515 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1516 for (n = 0; n < 100; n++) { 1517 DELAY(1); 1518 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1519 if (v & GEM_MIF_FRAME_TA0) 1520 return (v & GEM_MIF_FRAME_DATA); 1521 } 1522 1523 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1524 return (0); 1525 } 1526 1527 static void 1528 gem_mii_writereg(self, phy, reg, val) 1529 struct device *self; 1530 int phy, reg, val; 1531 { 1532 struct gem_softc *sc = (void *)self; 1533 bus_space_tag_t t = sc->sc_bustag; 1534 bus_space_handle_t mif = sc->sc_h; 1535 int n; 1536 u_int32_t v; 1537 1538 #ifdef GEM_DEBUG1 1539 if (sc->sc_debug) 1540 printf("gem_mii_writereg: phy %d reg %d val %x\n", 1541 phy, reg, val); 1542 #endif 1543 1544 #if 0 1545 /* Select the desired PHY in the MIF configuration register */ 1546 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1547 /* Clear PHY select bit */ 1548 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1549 if (phy == GEM_PHYAD_EXTERNAL) 1550 /* Set PHY select bit to get at external device */ 1551 v |= GEM_MIF_CONFIG_PHY_SEL; 1552 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1553 #endif 1554 /* Construct the frame command */ 1555 v = GEM_MIF_FRAME_WRITE | 1556 (phy << GEM_MIF_PHY_SHIFT) | 1557 (reg << GEM_MIF_REG_SHIFT) | 1558 (val & GEM_MIF_FRAME_DATA); 1559 1560 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1561 for (n = 0; n < 100; n++) { 1562 DELAY(1); 1563 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1564 if (v & GEM_MIF_FRAME_TA0) 1565 return; 1566 } 1567 1568 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1569 } 1570 1571 static void 1572 gem_mii_statchg(dev) 1573 struct device *dev; 1574 { 1575 struct gem_softc *sc = (void *)dev; 1576 #ifdef GEM_DEBUG 1577 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1578 #endif 1579 bus_space_tag_t t = sc->sc_bustag; 1580 bus_space_handle_t mac = sc->sc_h; 1581 u_int32_t v; 1582 1583 #ifdef GEM_DEBUG 1584 if (sc->sc_debug) 1585 printf("gem_mii_statchg: status change: phy = %d\n", 1586 sc->sc_phys[instance]); 1587 #endif 1588 1589 1590 /* Set tx full duplex options */ 1591 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1592 delay(10000); /* reg must be cleared and delay before changing. */ 1593 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1594 GEM_MAC_TX_ENABLE; 1595 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1596 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1597 } 1598 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1599 1600 /* XIF Configuration */ 1601 /* We should really calculate all this rather than rely on defaults */ 1602 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1603 v = GEM_MAC_XIF_LINK_LED; 1604 v |= GEM_MAC_XIF_TX_MII_ENA; 1605 /* If an external transceiver is connected, enable its MII drivers */ 1606 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1607 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1608 /* External MII needs echo disable if half duplex. */ 1609 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1610 /* turn on full duplex LED */ 1611 v |= GEM_MAC_XIF_FDPLX_LED; 1612 else 1613 /* half duplex -- disable echo */ 1614 v |= GEM_MAC_XIF_ECHO_DISABL; 1615 } else 1616 /* Internal MII needs buf enable */ 1617 v |= GEM_MAC_XIF_MII_BUF_ENA; 1618 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1619 } 1620 1621 int 1622 gem_mediachange(ifp) 1623 struct ifnet *ifp; 1624 { 1625 struct gem_softc *sc = ifp->if_softc; 1626 1627 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER) 1628 return (EINVAL); 1629 1630 return (mii_mediachg(&sc->sc_mii)); 1631 } 1632 1633 void 1634 gem_mediastatus(ifp, ifmr) 1635 struct ifnet *ifp; 1636 struct ifmediareq *ifmr; 1637 { 1638 struct gem_softc *sc = ifp->if_softc; 1639 1640 if ((ifp->if_flags & IFF_UP) == 0) 1641 return; 1642 1643 mii_pollstat(&sc->sc_mii); 1644 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1645 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1646 } 1647 1648 int gem_ioctldebug = 0; 1649 /* 1650 * Process an ioctl request. 1651 */ 1652 int 1653 gem_ioctl(ifp, cmd, data) 1654 struct ifnet *ifp; 1655 u_long cmd; 1656 caddr_t data; 1657 { 1658 struct gem_softc *sc = ifp->if_softc; 1659 struct ifaddr *ifa = (struct ifaddr *)data; 1660 struct ifreq *ifr = (struct ifreq *)data; 1661 int s, error = 0; 1662 1663 s = splimp(); 1664 1665 switch (cmd) { 1666 1667 case SIOCSIFADDR: 1668 ifp->if_flags |= IFF_UP; 1669 1670 switch (ifa->ifa_addr->sa_family) { 1671 #ifdef INET 1672 case AF_INET: 1673 gem_init(ifp); 1674 arp_ifinit(&sc->sc_arpcom, ifa); 1675 break; 1676 #endif 1677 #ifdef NS 1678 case AF_NS: 1679 { 1680 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 1681 1682 if (ns_nullhost(*ina)) 1683 ina->x_host = 1684 *(union ns_host *)LLADDR(ifp->if_sadl); 1685 else { 1686 memcpy(LLADDR(ifp->if_sadl), 1687 ina->x_host.c_host, sizeof(sc->sc_enaddr)); 1688 } 1689 /* Set new address. */ 1690 gem_init(ifp); 1691 break; 1692 } 1693 #endif 1694 default: 1695 gem_init(ifp); 1696 break; 1697 } 1698 break; 1699 1700 case SIOCSIFFLAGS: 1701 if ((ifp->if_flags & IFF_UP) == 0 && 1702 (ifp->if_flags & IFF_RUNNING) != 0) { 1703 /* 1704 * If interface is marked down and it is running, then 1705 * stop it. 1706 */ 1707 gem_stop(ifp, 1); 1708 ifp->if_flags &= ~IFF_RUNNING; 1709 } else if ((ifp->if_flags & IFF_UP) != 0 && 1710 (ifp->if_flags & IFF_RUNNING) == 0) { 1711 /* 1712 * If interface is marked up and it is stopped, then 1713 * start it. 1714 */ 1715 gem_init(ifp); 1716 } else if ((ifp->if_flags & IFF_UP) != 0) { 1717 /* 1718 * Reset the interface to pick up changes in any other 1719 * flags that affect hardware registers. 1720 */ 1721 /*gem_stop(sc);*/ 1722 gem_init(ifp); 1723 } 1724 #ifdef HMEDEBUG 1725 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1726 #endif 1727 break; 1728 1729 case SIOCADDMULTI: 1730 case SIOCDELMULTI: 1731 error = (cmd == SIOCADDMULTI) ? 1732 ether_addmulti(ifr, &sc->sc_arpcom) : 1733 ether_delmulti(ifr, &sc->sc_arpcom); 1734 1735 if (error == ENETRESET) { 1736 /* 1737 * Multicast list has changed; set the hardware filter 1738 * accordingly. 1739 */ 1740 gem_setladrf(sc); 1741 error = 0; 1742 } 1743 break; 1744 1745 case SIOCGIFMEDIA: 1746 case SIOCSIFMEDIA: 1747 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1748 break; 1749 1750 default: 1751 error = EINVAL; 1752 break; 1753 } 1754 1755 splx(s); 1756 return (error); 1757 } 1758 1759 1760 void 1761 gem_shutdown(arg) 1762 void *arg; 1763 { 1764 struct gem_softc *sc = (struct gem_softc *)arg; 1765 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1766 1767 gem_stop(ifp, 1); 1768 } 1769 1770 /* 1771 * Set up the logical address filter. 1772 */ 1773 void 1774 gem_setladrf(sc) 1775 struct gem_softc *sc; 1776 { 1777 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1778 struct ether_multi *enm; 1779 struct ether_multistep step; 1780 struct arpcom *ac = &sc->sc_arpcom; 1781 bus_space_tag_t t = sc->sc_bustag; 1782 bus_space_handle_t h = sc->sc_h; 1783 u_char *cp; 1784 u_int32_t crc; 1785 u_int32_t hash[16]; 1786 u_int32_t v; 1787 int len; 1788 1789 /* Clear hash table */ 1790 memset(hash, 0, sizeof(hash)); 1791 1792 /* Get current RX configuration */ 1793 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1794 1795 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1796 /* Turn on promiscuous mode; turn off the hash filter */ 1797 v |= GEM_MAC_RX_PROMISCUOUS; 1798 v &= ~GEM_MAC_RX_HASH_FILTER; 1799 ifp->if_flags |= IFF_ALLMULTI; 1800 goto chipit; 1801 } 1802 1803 /* Turn off promiscuous mode; turn on the hash filter */ 1804 v &= ~GEM_MAC_RX_PROMISCUOUS; 1805 v |= GEM_MAC_RX_HASH_FILTER; 1806 1807 /* 1808 * Set up multicast address filter by passing all multicast addresses 1809 * through a crc generator, and then using the high order 6 bits as an 1810 * index into the 256 bit logical address filter. The high order bit 1811 * selects the word, while the rest of the bits select the bit within 1812 * the word. 1813 */ 1814 1815 ETHER_FIRST_MULTI(step, ac, enm); 1816 while (enm != NULL) { 1817 if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) { 1818 /* 1819 * We must listen to a range of multicast addresses. 1820 * For now, just accept all multicasts, rather than 1821 * trying to set only those filter bits needed to match 1822 * the range. (At this time, the only use of address 1823 * ranges is for IP multicast routing, for which the 1824 * range is big enough to require all bits set.) 1825 */ 1826 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1827 ifp->if_flags |= IFF_ALLMULTI; 1828 goto chipit; 1829 } 1830 1831 cp = enm->enm_addrlo; 1832 crc = 0xffffffff; 1833 for (len = sizeof(enm->enm_addrlo); --len >= 0;) { 1834 int octet = *cp++; 1835 int i; 1836 1837 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1838 for (i = 0; i < 8; i++) { 1839 if ((crc & 1) ^ (octet & 1)) { 1840 crc >>= 1; 1841 crc ^= MC_POLY_LE; 1842 } else { 1843 crc >>= 1; 1844 } 1845 octet >>= 1; 1846 } 1847 } 1848 /* Just want the 8 most significant bits. */ 1849 crc >>= 24; 1850 1851 /* Set the corresponding bit in the filter. */ 1852 hash[crc >> 4] |= 1 << (crc & 0xf); 1853 1854 ETHER_NEXT_MULTI(step, enm); 1855 } 1856 1857 ifp->if_flags &= ~IFF_ALLMULTI; 1858 1859 chipit: 1860 /* Now load the hash table into the chip */ 1861 bus_space_write_4(t, h, GEM_MAC_HASH0, hash[0]); 1862 bus_space_write_4(t, h, GEM_MAC_HASH1, hash[1]); 1863 bus_space_write_4(t, h, GEM_MAC_HASH2, hash[2]); 1864 bus_space_write_4(t, h, GEM_MAC_HASH3, hash[3]); 1865 bus_space_write_4(t, h, GEM_MAC_HASH4, hash[4]); 1866 bus_space_write_4(t, h, GEM_MAC_HASH5, hash[5]); 1867 bus_space_write_4(t, h, GEM_MAC_HASH6, hash[6]); 1868 bus_space_write_4(t, h, GEM_MAC_HASH7, hash[7]); 1869 bus_space_write_4(t, h, GEM_MAC_HASH8, hash[8]); 1870 bus_space_write_4(t, h, GEM_MAC_HASH9, hash[9]); 1871 bus_space_write_4(t, h, GEM_MAC_HASH10, hash[10]); 1872 bus_space_write_4(t, h, GEM_MAC_HASH11, hash[11]); 1873 bus_space_write_4(t, h, GEM_MAC_HASH12, hash[12]); 1874 bus_space_write_4(t, h, GEM_MAC_HASH13, hash[13]); 1875 bus_space_write_4(t, h, GEM_MAC_HASH14, hash[14]); 1876 bus_space_write_4(t, h, GEM_MAC_HASH15, hash[15]); 1877 1878 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1879 } 1880