1 /* $OpenBSD: gem.c,v 1.30 2003/01/23 22:55:52 jason Exp $ */ 2 /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2001 Eduardo Horvath. 7 * All rights reserved. 8 * 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #include "bpfilter.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/timeout.h> 43 #include <sys/mbuf.h> 44 #include <sys/syslog.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 52 #include <machine/endian.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <net/if.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 60 #ifdef INET 61 #include <netinet/in.h> 62 #include <netinet/if_ether.h> 63 #endif 64 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 69 #if NVLAN > 0 70 #include <net/if_vlan_var.h> 71 #endif 72 73 #include <machine/bus.h> 74 #include <machine/intr.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 #include <dev/mii/mii_bitbang.h> 79 80 #include <dev/ic/gemreg.h> 81 #include <dev/ic/gemvar.h> 82 83 #define TRIES 10000 84 85 struct cfdriver gem_cd = { 86 NULL, "gem", DV_IFNET 87 }; 88 89 void gem_start(struct ifnet *); 90 void gem_stop(struct ifnet *, int); 91 int gem_ioctl(struct ifnet *, u_long, caddr_t); 92 void gem_tick(void *); 93 void gem_watchdog(struct ifnet *); 94 void gem_shutdown(void *); 95 int gem_init(struct ifnet *); 96 void gem_init_regs(struct gem_softc *sc); 97 static int gem_ringsize(int sz); 98 int gem_meminit(struct gem_softc *); 99 void gem_mifinit(struct gem_softc *); 100 void gem_reset(struct gem_softc *); 101 int gem_reset_rx(struct gem_softc *sc); 102 int gem_reset_tx(struct gem_softc *sc); 103 int gem_disable_rx(struct gem_softc *sc); 104 int gem_disable_tx(struct gem_softc *sc); 105 void gem_rxdrain(struct gem_softc *sc); 106 int gem_add_rxbuf(struct gem_softc *sc, int idx); 107 void gem_setladrf(struct gem_softc *); 108 int gem_encap(struct gem_softc *, struct mbuf *, u_int32_t *); 109 110 /* MII methods & callbacks */ 111 static int gem_mii_readreg(struct device *, int, int); 112 static void gem_mii_writereg(struct device *, int, int, int); 113 static void gem_mii_statchg(struct device *); 114 115 int gem_mediachange(struct ifnet *); 116 void gem_mediastatus(struct ifnet *, struct ifmediareq *); 117 118 struct mbuf *gem_get(struct gem_softc *, int, int); 119 int gem_put(struct gem_softc *, int, struct mbuf *); 120 void gem_read(struct gem_softc *, int, int); 121 int gem_eint(struct gem_softc *, u_int); 122 int gem_rint(struct gem_softc *); 123 int gem_tint(struct gem_softc *, u_int32_t); 124 void gem_power(int, void *); 125 126 #ifdef GEM_DEBUG 127 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 128 printf x 129 #else 130 #define DPRINTF(sc, x) /* nothing */ 131 #endif 132 133 134 /* 135 * gem_config: 136 * 137 * Attach a Gem interface to the system. 138 */ 139 void 140 gem_config(sc) 141 struct gem_softc *sc; 142 { 143 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 144 struct mii_data *mii = &sc->sc_mii; 145 struct mii_softc *child; 146 int i, error; 147 struct ifmedia_entry *ifm; 148 149 bcopy(sc->sc_enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 150 151 /* Make sure the chip is stopped. */ 152 ifp->if_softc = sc; 153 gem_reset(sc); 154 155 /* 156 * Allocate the control data structures, and create and load the 157 * DMA map for it. 158 */ 159 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 160 sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 161 1, &sc->sc_cdnseg, 0)) != 0) { 162 printf("%s: unable to allocate control data, error = %d\n", 163 sc->sc_dev.dv_xname, error); 164 goto fail_0; 165 } 166 167 /* XXX should map this in with correct endianness */ 168 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 169 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data, 170 BUS_DMA_COHERENT)) != 0) { 171 printf("%s: unable to map control data, error = %d\n", 172 sc->sc_dev.dv_xname, error); 173 goto fail_1; 174 } 175 176 if ((error = bus_dmamap_create(sc->sc_dmatag, 177 sizeof(struct gem_control_data), 1, 178 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 179 printf("%s: unable to create control data DMA map, " 180 "error = %d\n", sc->sc_dev.dv_xname, error); 181 goto fail_2; 182 } 183 184 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 185 sc->sc_control_data, sizeof(struct gem_control_data), NULL, 186 0)) != 0) { 187 printf("%s: unable to load control data DMA map, error = %d\n", 188 sc->sc_dev.dv_xname, error); 189 goto fail_3; 190 } 191 192 /* 193 * Create the receive buffer DMA maps. 194 */ 195 for (i = 0; i < GEM_NRXDESC; i++) { 196 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 197 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 198 printf("%s: unable to create rx DMA map %d, " 199 "error = %d\n", sc->sc_dev.dv_xname, i, error); 200 goto fail_5; 201 } 202 sc->sc_rxsoft[i].rxs_mbuf = NULL; 203 } 204 205 /* 206 * From this point forward, the attachment cannot fail. A failure 207 * before this point releases all resources that may have been 208 * allocated. 209 */ 210 211 /* Announce ourselves. */ 212 printf("%s: address %s\n", sc->sc_dev.dv_xname, 213 ether_sprintf(sc->sc_enaddr)); 214 215 /* Get RX FIFO size */ 216 sc->sc_rxfifosize = 64 * 217 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 218 219 /* Initialize ifnet structure. */ 220 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 221 ifp->if_softc = sc; 222 ifp->if_flags = 223 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 224 ifp->if_start = gem_start; 225 ifp->if_ioctl = gem_ioctl; 226 ifp->if_watchdog = gem_watchdog; 227 IFQ_SET_READY(&ifp->if_snd); 228 229 /* Initialize ifmedia structures and MII info */ 230 mii->mii_ifp = ifp; 231 mii->mii_readreg = gem_mii_readreg; 232 mii->mii_writereg = gem_mii_writereg; 233 mii->mii_statchg = gem_mii_statchg; 234 235 ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); 236 237 gem_mifinit(sc); 238 239 mii_attach(&sc->sc_dev, mii, 0xffffffff, 240 MII_PHY_ANY, MII_OFFSET_ANY, 0); 241 242 child = LIST_FIRST(&mii->mii_phys); 243 if (child == NULL) { 244 /* No PHY attached */ 245 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 246 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 247 } else { 248 /* 249 * Walk along the list of attached MII devices and 250 * establish an `MII instance' to `phy number' 251 * mapping. We'll use this mapping in media change 252 * requests to determine which phy to use to program 253 * the MIF configuration register. 254 */ 255 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 256 /* 257 * Note: we support just two PHYs: the built-in 258 * internal device and an external on the MII 259 * connector. 260 */ 261 if (child->mii_phy > 1 || child->mii_inst > 1) { 262 printf("%s: cannot accommodate MII device %s" 263 " at phy %d, instance %d\n", 264 sc->sc_dev.dv_xname, 265 child->mii_dev.dv_xname, 266 child->mii_phy, child->mii_inst); 267 continue; 268 } 269 270 sc->sc_phys[child->mii_inst] = child->mii_phy; 271 } 272 273 /* 274 * Now select and activate the PHY we will use. 275 * 276 * The order of preference is External (MDI1), 277 * Internal (MDI0), Serial Link (no MII). 278 */ 279 if (sc->sc_phys[1]) { 280 #ifdef DEBUG 281 printf("using external phy\n"); 282 #endif 283 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 284 } else { 285 #ifdef DEBUG 286 printf("using internal phy\n"); 287 #endif 288 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 289 } 290 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 291 sc->sc_mif_config); 292 293 /* 294 * XXX - we can really do the following ONLY if the 295 * phy indeed has the auto negotiation capability!! 296 */ 297 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 298 } 299 300 /* 301 * If we support GigE media, we support jumbo frames too. 302 * Unless we are Apple. 303 */ 304 TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) { 305 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T || 306 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX || 307 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX || 308 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) { 309 #if 0 310 if (sc->sc_variant != GEM_APPLE_GMAC) 311 sc->sc_ethercom.ec_capabilities 312 |= ETHERCAP_JUMBO_MTU; 313 #endif 314 315 sc->sc_flags |= GEM_GIGABIT; 316 break; 317 } 318 } 319 320 /* Attach the interface. */ 321 if_attach(ifp); 322 ether_ifattach(ifp); 323 324 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc); 325 if (sc->sc_sh == NULL) 326 panic("gem_config: can't establish shutdownhook"); 327 328 timeout_set(&sc->sc_tick_ch, gem_tick, sc); 329 return; 330 331 /* 332 * Free any resources we've allocated during the failed attach 333 * attempt. Do this in reverse order and fall through. 334 */ 335 fail_5: 336 for (i = 0; i < GEM_NRXDESC; i++) { 337 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 338 bus_dmamap_destroy(sc->sc_dmatag, 339 sc->sc_rxsoft[i].rxs_dmamap); 340 } 341 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 342 fail_3: 343 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 344 fail_2: 345 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 346 sizeof(struct gem_control_data)); 347 fail_1: 348 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 349 fail_0: 350 return; 351 } 352 353 354 void 355 gem_tick(arg) 356 void *arg; 357 { 358 struct gem_softc *sc = arg; 359 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 360 bus_space_tag_t t = sc->sc_bustag; 361 bus_space_handle_t mac = sc->sc_h; 362 int s; 363 364 /* unload collisions counters */ 365 ifp->if_collisions += 366 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 367 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 368 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 369 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 370 371 /* clear the hardware counters */ 372 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 373 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 374 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 375 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 376 377 s = splimp(); 378 mii_tick(&sc->sc_mii); 379 splx(s); 380 381 timeout_add(&sc->sc_tick_ch, hz); 382 } 383 384 void 385 gem_reset(sc) 386 struct gem_softc *sc; 387 { 388 bus_space_tag_t t = sc->sc_bustag; 389 bus_space_handle_t h = sc->sc_h; 390 int i; 391 int s; 392 393 s = splimp(); 394 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); 395 gem_reset_rx(sc); 396 gem_reset_tx(sc); 397 398 /* Do a full reset */ 399 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX); 400 for (i=TRIES; i--; delay(100)) 401 if ((bus_space_read_4(t, h, GEM_RESET) & 402 (GEM_RESET_RX|GEM_RESET_TX)) == 0) 403 break; 404 if ((bus_space_read_4(t, h, GEM_RESET) & 405 (GEM_RESET_RX|GEM_RESET_TX)) != 0) { 406 printf("%s: cannot reset device\n", 407 sc->sc_dev.dv_xname); 408 } 409 splx(s); 410 } 411 412 413 /* 414 * gem_rxdrain: 415 * 416 * Drain the receive queue. 417 */ 418 void 419 gem_rxdrain(struct gem_softc *sc) 420 { 421 struct gem_rxsoft *rxs; 422 int i; 423 424 for (i = 0; i < GEM_NRXDESC; i++) { 425 rxs = &sc->sc_rxsoft[i]; 426 if (rxs->rxs_mbuf != NULL) { 427 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 428 m_freem(rxs->rxs_mbuf); 429 rxs->rxs_mbuf = NULL; 430 } 431 } 432 } 433 434 /* 435 * Reset the whole thing. 436 */ 437 void 438 gem_stop(struct ifnet *ifp, int disable) 439 { 440 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 441 struct gem_sxd *sd; 442 u_int32_t i; 443 444 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); 445 446 timeout_del(&sc->sc_tick_ch); 447 mii_down(&sc->sc_mii); 448 449 /* XXX - Should we reset these instead? */ 450 gem_disable_rx(sc); 451 gem_disable_tx(sc); 452 453 /* 454 * Release any queued transmit buffers. 455 */ 456 for (i = 0; i < GEM_NTXDESC; i++) { 457 sd = &sc->sc_txd[i]; 458 if (sd->sd_map != NULL) { 459 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 460 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 461 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 462 bus_dmamap_destroy(sc->sc_dmatag, sd->sd_map); 463 sd->sd_map = NULL; 464 } 465 if (sd->sd_mbuf != NULL) { 466 m_freem(sd->sd_mbuf); 467 sd->sd_mbuf = NULL; 468 } 469 } 470 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 471 472 if (disable) { 473 gem_rxdrain(sc); 474 } 475 476 /* 477 * Mark the interface down and cancel the watchdog timer. 478 */ 479 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 480 ifp->if_timer = 0; 481 } 482 483 484 /* 485 * Reset the receiver 486 */ 487 int 488 gem_reset_rx(struct gem_softc *sc) 489 { 490 bus_space_tag_t t = sc->sc_bustag; 491 bus_space_handle_t h = sc->sc_h; 492 int i; 493 494 495 /* 496 * Resetting while DMA is in progress can cause a bus hang, so we 497 * disable DMA first. 498 */ 499 gem_disable_rx(sc); 500 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 501 /* Wait till it finishes */ 502 for (i = TRIES; i--; delay(100)) 503 if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) == 0) 504 break; 505 if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) != 0) 506 printf("%s: cannot disable read dma\n", 507 sc->sc_dev.dv_xname); 508 509 /* Wait 5ms extra. */ 510 delay(5000); 511 512 /* Finally, reset the ERX */ 513 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 514 /* Wait till it finishes */ 515 for (i = TRIES; i--; delay(100)) 516 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) == 0) 517 break; 518 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) != 0) { 519 printf("%s: cannot reset receiver\n", 520 sc->sc_dev.dv_xname); 521 return (1); 522 } 523 return (0); 524 } 525 526 527 /* 528 * Reset the transmitter 529 */ 530 int 531 gem_reset_tx(struct gem_softc *sc) 532 { 533 bus_space_tag_t t = sc->sc_bustag; 534 bus_space_handle_t h = sc->sc_h; 535 int i; 536 537 /* 538 * Resetting while DMA is in progress can cause a bus hang, so we 539 * disable DMA first. 540 */ 541 gem_disable_tx(sc); 542 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 543 /* Wait till it finishes */ 544 for (i = TRIES; i--; delay(100)) 545 if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) == 0) 546 break; 547 if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) != 0) 548 printf("%s: cannot disable read dma\n", 549 sc->sc_dev.dv_xname); 550 551 /* Wait 5ms extra. */ 552 delay(5000); 553 554 /* Finally, reset the ETX */ 555 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 556 /* Wait till it finishes */ 557 for (i = TRIES; i--; delay(100)) 558 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 559 break; 560 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) != 0) { 561 printf("%s: cannot reset receiver\n", 562 sc->sc_dev.dv_xname); 563 return (1); 564 } 565 return (0); 566 } 567 568 /* 569 * disable receiver. 570 */ 571 int 572 gem_disable_rx(struct gem_softc *sc) 573 { 574 bus_space_tag_t t = sc->sc_bustag; 575 bus_space_handle_t h = sc->sc_h; 576 int i; 577 u_int32_t cfg; 578 579 /* Flip the enable bit */ 580 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 581 cfg &= ~GEM_MAC_RX_ENABLE; 582 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 583 584 /* Wait for it to finish */ 585 for (i = TRIES; i--; delay(100)) 586 if ((bus_space_read_4(t, h, GEM_MAC_RX_CONFIG) & 587 GEM_MAC_RX_ENABLE) == 0) 588 return (0); 589 return (1); 590 } 591 592 /* 593 * disable transmitter. 594 */ 595 int 596 gem_disable_tx(struct gem_softc *sc) 597 { 598 bus_space_tag_t t = sc->sc_bustag; 599 bus_space_handle_t h = sc->sc_h; 600 int i; 601 u_int32_t cfg; 602 603 /* Flip the enable bit */ 604 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 605 cfg &= ~GEM_MAC_TX_ENABLE; 606 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 607 608 /* Wait for it to finish */ 609 for (i = TRIES; i--; delay(100)) 610 if ((bus_space_read_4(t, h, GEM_MAC_TX_CONFIG) & 611 GEM_MAC_TX_ENABLE) == 0) 612 return (0); 613 return (1); 614 } 615 616 /* 617 * Initialize interface. 618 */ 619 int 620 gem_meminit(struct gem_softc *sc) 621 { 622 struct gem_rxsoft *rxs; 623 int i, error; 624 625 /* 626 * Initialize the transmit descriptor ring. 627 */ 628 for (i = 0; i < GEM_NTXDESC; i++) { 629 sc->sc_txdescs[i].gd_flags = 0; 630 sc->sc_txdescs[i].gd_addr = 0; 631 } 632 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 633 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 634 635 /* 636 * Initialize the receive descriptor and receive job 637 * descriptor rings. 638 */ 639 for (i = 0; i < GEM_NRXDESC; i++) { 640 rxs = &sc->sc_rxsoft[i]; 641 if (rxs->rxs_mbuf == NULL) { 642 if ((error = gem_add_rxbuf(sc, i)) != 0) { 643 printf("%s: unable to allocate or map rx " 644 "buffer %d, error = %d\n", 645 sc->sc_dev.dv_xname, i, error); 646 /* 647 * XXX Should attempt to run with fewer receive 648 * XXX buffers instead of just failing. 649 */ 650 gem_rxdrain(sc); 651 return (1); 652 } 653 } else 654 GEM_INIT_RXDESC(sc, i); 655 } 656 sc->sc_rxptr = 0; 657 658 return (0); 659 } 660 661 static int 662 gem_ringsize(int sz) 663 { 664 int v; 665 666 switch (sz) { 667 case 32: 668 v = GEM_RING_SZ_32; 669 break; 670 case 64: 671 v = GEM_RING_SZ_64; 672 break; 673 case 128: 674 v = GEM_RING_SZ_128; 675 break; 676 case 256: 677 v = GEM_RING_SZ_256; 678 break; 679 case 512: 680 v = GEM_RING_SZ_512; 681 break; 682 case 1024: 683 v = GEM_RING_SZ_1024; 684 break; 685 case 2048: 686 v = GEM_RING_SZ_2048; 687 break; 688 case 4096: 689 v = GEM_RING_SZ_4096; 690 break; 691 case 8192: 692 v = GEM_RING_SZ_8192; 693 break; 694 default: 695 v = GEM_RING_SZ_32; 696 printf("gem: invalid Receive Descriptor ring size\n"); 697 break; 698 } 699 return (v); 700 } 701 702 /* 703 * Initialization of interface; set up initialization block 704 * and transmit/receive descriptor rings. 705 */ 706 int 707 gem_init(struct ifnet *ifp) 708 { 709 710 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 711 bus_space_tag_t t = sc->sc_bustag; 712 bus_space_handle_t h = sc->sc_h; 713 int s; 714 u_int max_frame_size; 715 u_int32_t v; 716 717 s = splimp(); 718 719 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); 720 /* 721 * Initialization sequence. The numbered steps below correspond 722 * to the sequence outlined in section 6.3.5.1 in the Ethernet 723 * Channel Engine manual (part of the PCIO manual). 724 * See also the STP2002-STQ document from Sun Microsystems. 725 */ 726 727 /* step 1 & 2. Reset the Ethernet Channel */ 728 gem_stop(ifp, 0); 729 gem_reset(sc); 730 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); 731 732 /* Re-initialize the MIF */ 733 gem_mifinit(sc); 734 735 /* Call MI reset function if any */ 736 if (sc->sc_hwreset) 737 (*sc->sc_hwreset)(sc); 738 739 /* step 3. Setup data structures in host memory */ 740 gem_meminit(sc); 741 742 /* step 4. TX MAC registers & counters */ 743 gem_init_regs(sc); 744 max_frame_size = max(ifp->if_mtu, ETHERMTU); 745 max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN; 746 #if 0 747 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) 748 max_frame_size += ETHER_VLAN_ENCAP_LEN; 749 #endif 750 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 751 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 752 753 /* step 5. RX MAC registers & counters */ 754 gem_setladrf(sc); 755 756 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 757 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 758 (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32)); 759 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 760 761 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 762 (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32)); 763 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 764 765 /* step 8. Global Configuration & Interrupt Mask */ 766 bus_space_write_4(t, h, GEM_INTMASK, 767 ~(GEM_INTR_TX_INTME| 768 GEM_INTR_TX_EMPTY| 769 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 770 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 771 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 772 GEM_INTR_BERR)); 773 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 774 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 775 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 776 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 777 778 /* step 9. ETX Configuration: use mostly default values */ 779 780 /* Enable DMA */ 781 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 782 bus_space_write_4(t, h, GEM_TX_CONFIG, 783 v|GEM_TX_CONFIG_TXDMA_EN| 784 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 785 bus_space_write_4(t, h, GEM_TX_KICK, 0); 786 787 /* step 10. ERX Configuration */ 788 789 /* Encode Receive Descriptor ring size: four possible values */ 790 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 791 792 /* Enable DMA */ 793 bus_space_write_4(t, h, GEM_RX_CONFIG, 794 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 795 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 796 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 797 /* 798 * The following value is for an OFF Threshold of about 3/4 full 799 * and an ON Threshold of 1/4 full. 800 */ 801 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 802 (3 * sc->sc_rxfifosize / 256) | 803 ( (sc->sc_rxfifosize / 256) << 12)); 804 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 805 806 /* step 11. Configure Media */ 807 mii_mediachg(&sc->sc_mii); 808 809 /* step 12. RX_MAC Configuration Register */ 810 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 811 v |= GEM_MAC_RX_ENABLE; 812 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 813 814 /* step 14. Issue Transmit Pending command */ 815 816 /* Call MI initialization function if any */ 817 if (sc->sc_hwinit) 818 (*sc->sc_hwinit)(sc); 819 820 821 /* step 15. Give the receiver a swift kick */ 822 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 823 824 /* Start the one second timer. */ 825 timeout_add(&sc->sc_tick_ch, hz); 826 827 ifp->if_flags |= IFF_RUNNING; 828 ifp->if_flags &= ~IFF_OACTIVE; 829 ifp->if_timer = 0; 830 splx(s); 831 832 return (0); 833 } 834 835 void 836 gem_init_regs(struct gem_softc *sc) 837 { 838 bus_space_tag_t t = sc->sc_bustag; 839 bus_space_handle_t h = sc->sc_h; 840 u_int32_t v; 841 842 /* These regs are not cleared on reset */ 843 sc->sc_inited = 0; 844 if (!sc->sc_inited) { 845 846 /* Wooo. Magic values. */ 847 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 848 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 849 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 850 851 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 852 /* Max frame and max burst size */ 853 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 854 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 855 856 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 857 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 858 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 859 /* Dunno.... */ 860 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 861 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 862 ((sc->sc_enaddr[5]<<8)|sc->sc_enaddr[4])&0x3ff); 863 864 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 865 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 866 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 867 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 868 /* MAC control addr set to 0:1:c2:0:1:80 */ 869 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 870 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 871 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 872 873 /* MAC filter addr set to 0:0:0:0:0:0 */ 874 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 875 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 876 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 877 878 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 879 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 880 881 sc->sc_inited = 1; 882 } 883 884 /* Counters need to be zeroed */ 885 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 886 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 887 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 888 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 889 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 890 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 891 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 892 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 893 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 894 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 895 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 896 897 /* Un-pause stuff */ 898 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 899 900 /* 901 * Set the station address. 902 */ 903 bus_space_write_4(t, h, GEM_MAC_ADDR0, 904 (sc->sc_enaddr[4]<<8) | sc->sc_enaddr[5]); 905 bus_space_write_4(t, h, GEM_MAC_ADDR1, 906 (sc->sc_enaddr[2]<<8) | sc->sc_enaddr[3]); 907 bus_space_write_4(t, h, GEM_MAC_ADDR2, 908 (sc->sc_enaddr[0]<<8) | sc->sc_enaddr[1]); 909 910 911 /* 912 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 913 */ 914 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 915 v = GEM_MAC_XIF_TX_MII_ENA; 916 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 917 v |= GEM_MAC_XIF_FDPLX_LED; 918 if (sc->sc_flags & GEM_GIGABIT) 919 v |= GEM_MAC_XIF_GMII_MODE; 920 } 921 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 922 } 923 924 /* 925 * Receive interrupt. 926 */ 927 int 928 gem_rint(sc) 929 struct gem_softc *sc; 930 { 931 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 932 bus_space_tag_t t = sc->sc_bustag; 933 bus_space_handle_t h = sc->sc_h; 934 struct ether_header *eh; 935 struct gem_rxsoft *rxs; 936 struct mbuf *m; 937 u_int64_t rxstat; 938 int i, len; 939 940 for (i = sc->sc_rxptr;; i = GEM_NEXTRX(i)) { 941 rxs = &sc->sc_rxsoft[i]; 942 943 GEM_CDRXSYNC(sc, i, 944 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 945 946 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 947 948 if (rxstat & GEM_RD_OWN) { 949 /* 950 * We have processed all of the receive buffers. 951 */ 952 break; 953 } 954 955 if (rxstat & GEM_RD_BAD_CRC) { 956 printf("%s: receive error: CRC error\n", 957 sc->sc_dev.dv_xname); 958 GEM_INIT_RXDESC(sc, i); 959 continue; 960 } 961 962 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 963 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 964 #ifdef GEM_DEBUG 965 if (ifp->if_flags & IFF_DEBUG) { 966 printf(" rxsoft %p descriptor %d: ", rxs, i); 967 printf("gd_flags: 0x%016llx\t", (long long) 968 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 969 printf("gd_addr: 0x%016llx\n", (long long) 970 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 971 } 972 #endif 973 974 /* 975 * No errors; receive the packet. Note the Gem 976 * includes the CRC with every packet. 977 */ 978 len = GEM_RD_BUFLEN(rxstat); 979 980 /* 981 * Allocate a new mbuf cluster. If that fails, we are 982 * out of memory, and must drop the packet and recycle 983 * the buffer that's already attached to this descriptor. 984 */ 985 m = rxs->rxs_mbuf; 986 if (gem_add_rxbuf(sc, i) != 0) { 987 ifp->if_ierrors++; 988 GEM_INIT_RXDESC(sc, i); 989 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 990 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 991 continue; 992 } 993 m->m_data += 2; /* We're already off by two */ 994 995 ifp->if_ipackets++; 996 eh = mtod(m, struct ether_header *); 997 m->m_pkthdr.rcvif = ifp; 998 m->m_pkthdr.len = m->m_len = len; 999 1000 #if NBPFILTER > 0 1001 /* 1002 * Pass this up to any BPF listeners, but only 1003 * pass it up the stack if its for us. 1004 */ 1005 if (ifp->if_bpf) 1006 bpf_mtap(ifp->if_bpf, m); 1007 #endif /* NPBFILTER > 0 */ 1008 1009 /* Pass it on. */ 1010 ether_input_mbuf(ifp, m); 1011 } 1012 1013 /* Update the receive pointer. */ 1014 sc->sc_rxptr = i; 1015 bus_space_write_4(t, h, GEM_RX_KICK, i); 1016 1017 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1018 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1019 1020 return (1); 1021 } 1022 1023 1024 /* 1025 * gem_add_rxbuf: 1026 * 1027 * Add a receive buffer to the indicated descriptor. 1028 */ 1029 int 1030 gem_add_rxbuf(struct gem_softc *sc, int idx) 1031 { 1032 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1033 struct mbuf *m; 1034 int error; 1035 1036 MGETHDR(m, M_DONTWAIT, MT_DATA); 1037 if (m == NULL) 1038 return (ENOBUFS); 1039 1040 MCLGET(m, M_DONTWAIT); 1041 if ((m->m_flags & M_EXT) == 0) { 1042 m_freem(m); 1043 return (ENOBUFS); 1044 } 1045 1046 #ifdef GEM_DEBUG 1047 /* bzero the packet to check dma */ 1048 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1049 #endif 1050 1051 if (rxs->rxs_mbuf != NULL) 1052 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1053 1054 rxs->rxs_mbuf = m; 1055 1056 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1057 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1058 BUS_DMA_READ|BUS_DMA_NOWAIT); 1059 if (error) { 1060 printf("%s: can't load rx DMA map %d, error = %d\n", 1061 sc->sc_dev.dv_xname, idx, error); 1062 panic("gem_add_rxbuf"); /* XXX */ 1063 } 1064 1065 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1066 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1067 1068 GEM_INIT_RXDESC(sc, idx); 1069 1070 return (0); 1071 } 1072 1073 1074 int 1075 gem_eint(sc, status) 1076 struct gem_softc *sc; 1077 u_int status; 1078 { 1079 if ((status & GEM_INTR_MIF) != 0) { 1080 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1081 return (1); 1082 } 1083 1084 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS); 1085 return (1); 1086 } 1087 1088 1089 int 1090 gem_intr(v) 1091 void *v; 1092 { 1093 struct gem_softc *sc = (struct gem_softc *)v; 1094 bus_space_tag_t t = sc->sc_bustag; 1095 bus_space_handle_t seb = sc->sc_h; 1096 u_int32_t status; 1097 int r = 0; 1098 1099 status = bus_space_read_4(t, seb, GEM_STATUS); 1100 DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", 1101 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); 1102 1103 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1104 r |= gem_eint(sc, status); 1105 1106 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1107 r |= gem_tint(sc, status); 1108 1109 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1110 r |= gem_rint(sc); 1111 1112 /* We should eventually do more than just print out error stats. */ 1113 if (status & GEM_INTR_TX_MAC) { 1114 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1115 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1116 printf("%s: MAC tx fault, status %x\n", 1117 sc->sc_dev.dv_xname, txstat); 1118 } 1119 if (status & GEM_INTR_RX_MAC) { 1120 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1121 1122 rxstat &= ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 1123 if (rxstat & GEM_MAC_RX_OVERFLOW) { 1124 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1125 1126 gem_init(ifp); 1127 ifp->if_ierrors++; 1128 } else { 1129 /* 1130 * Leave this in here until I figure out what to do 1131 * about other errors. 1132 */ 1133 printf("%s: MAC rx fault, status %x\n", 1134 sc->sc_dev.dv_xname, rxstat); 1135 } 1136 } 1137 return (r); 1138 } 1139 1140 1141 void 1142 gem_watchdog(ifp) 1143 struct ifnet *ifp; 1144 { 1145 struct gem_softc *sc = ifp->if_softc; 1146 1147 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1148 "GEM_MAC_RX_CONFIG %x\n", 1149 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1150 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1151 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1152 1153 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1154 ++ifp->if_oerrors; 1155 1156 /* Try to get more packets going. */ 1157 gem_init(ifp); 1158 } 1159 1160 /* 1161 * Initialize the MII Management Interface 1162 */ 1163 void 1164 gem_mifinit(sc) 1165 struct gem_softc *sc; 1166 { 1167 bus_space_tag_t t = sc->sc_bustag; 1168 bus_space_handle_t mif = sc->sc_h; 1169 1170 /* Configure the MIF in frame mode */ 1171 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1172 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1173 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1174 } 1175 1176 /* 1177 * MII interface 1178 * 1179 * The GEM MII interface supports at least three different operating modes: 1180 * 1181 * Bitbang mode is implemented using data, clock and output enable registers. 1182 * 1183 * Frame mode is implemented by loading a complete frame into the frame 1184 * register and polling the valid bit for completion. 1185 * 1186 * Polling mode uses the frame register but completion is indicated by 1187 * an interrupt. 1188 * 1189 */ 1190 static int 1191 gem_mii_readreg(self, phy, reg) 1192 struct device *self; 1193 int phy, reg; 1194 { 1195 struct gem_softc *sc = (void *)self; 1196 bus_space_tag_t t = sc->sc_bustag; 1197 bus_space_handle_t mif = sc->sc_h; 1198 int n; 1199 u_int32_t v; 1200 1201 #ifdef GEM_DEBUG1 1202 if (sc->sc_debug) 1203 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1204 #endif 1205 1206 /* Construct the frame command */ 1207 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1208 GEM_MIF_FRAME_READ; 1209 1210 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1211 for (n = 0; n < 100; n++) { 1212 DELAY(1); 1213 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1214 if (v & GEM_MIF_FRAME_TA0) 1215 return (v & GEM_MIF_FRAME_DATA); 1216 } 1217 1218 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1219 return (0); 1220 } 1221 1222 static void 1223 gem_mii_writereg(self, phy, reg, val) 1224 struct device *self; 1225 int phy, reg, val; 1226 { 1227 struct gem_softc *sc = (void *)self; 1228 bus_space_tag_t t = sc->sc_bustag; 1229 bus_space_handle_t mif = sc->sc_h; 1230 int n; 1231 u_int32_t v; 1232 1233 #ifdef GEM_DEBUG1 1234 if (sc->sc_debug) 1235 printf("gem_mii_writereg: phy %d reg %d val %x\n", 1236 phy, reg, val); 1237 #endif 1238 1239 #if 0 1240 /* Select the desired PHY in the MIF configuration register */ 1241 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1242 /* Clear PHY select bit */ 1243 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1244 if (phy == GEM_PHYAD_EXTERNAL) 1245 /* Set PHY select bit to get at external device */ 1246 v |= GEM_MIF_CONFIG_PHY_SEL; 1247 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1248 #endif 1249 /* Construct the frame command */ 1250 v = GEM_MIF_FRAME_WRITE | 1251 (phy << GEM_MIF_PHY_SHIFT) | 1252 (reg << GEM_MIF_REG_SHIFT) | 1253 (val & GEM_MIF_FRAME_DATA); 1254 1255 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1256 for (n = 0; n < 100; n++) { 1257 DELAY(1); 1258 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1259 if (v & GEM_MIF_FRAME_TA0) 1260 return; 1261 } 1262 1263 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1264 } 1265 1266 static void 1267 gem_mii_statchg(dev) 1268 struct device *dev; 1269 { 1270 struct gem_softc *sc = (void *)dev; 1271 #ifdef GEM_DEBUG 1272 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1273 #endif 1274 bus_space_tag_t t = sc->sc_bustag; 1275 bus_space_handle_t mac = sc->sc_h; 1276 u_int32_t v; 1277 1278 #ifdef GEM_DEBUG 1279 if (sc->sc_debug) 1280 printf("gem_mii_statchg: status change: phy = %d\n", 1281 sc->sc_phys[instance]); 1282 #endif 1283 1284 1285 /* Set tx full duplex options */ 1286 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1287 delay(10000); /* reg must be cleared and delay before changing. */ 1288 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1289 GEM_MAC_TX_ENABLE; 1290 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1291 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1292 } 1293 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1294 1295 /* XIF Configuration */ 1296 /* We should really calculate all this rather than rely on defaults */ 1297 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1298 v = GEM_MAC_XIF_LINK_LED; 1299 v |= GEM_MAC_XIF_TX_MII_ENA; 1300 /* If an external transceiver is connected, enable its MII drivers */ 1301 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1302 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1303 /* External MII needs echo disable if half duplex. */ 1304 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1305 /* turn on full duplex LED */ 1306 v |= GEM_MAC_XIF_FDPLX_LED; 1307 else 1308 /* half duplex -- disable echo */ 1309 v |= GEM_MAC_XIF_ECHO_DISABL; 1310 1311 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1312 case IFM_1000_T: /* Gigabit using GMII interface */ 1313 v |= GEM_MAC_XIF_GMII_MODE; 1314 break; 1315 default: 1316 v &= ~GEM_MAC_XIF_GMII_MODE; 1317 } 1318 } else 1319 /* Internal MII needs buf enable */ 1320 v |= GEM_MAC_XIF_MII_BUF_ENA; 1321 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1322 } 1323 1324 int 1325 gem_mediachange(ifp) 1326 struct ifnet *ifp; 1327 { 1328 struct gem_softc *sc = ifp->if_softc; 1329 struct mii_data *mii = &sc->sc_mii; 1330 1331 if (mii->mii_instance) { 1332 struct mii_softc *miisc; 1333 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1334 miisc = LIST_NEXT(miisc, mii_list)) 1335 mii_phy_reset(miisc); 1336 } 1337 1338 return (mii_mediachg(&sc->sc_mii)); 1339 } 1340 1341 void 1342 gem_mediastatus(ifp, ifmr) 1343 struct ifnet *ifp; 1344 struct ifmediareq *ifmr; 1345 { 1346 struct gem_softc *sc = ifp->if_softc; 1347 1348 mii_pollstat(&sc->sc_mii); 1349 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1350 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1351 } 1352 1353 /* 1354 * Process an ioctl request. 1355 */ 1356 int 1357 gem_ioctl(ifp, cmd, data) 1358 struct ifnet *ifp; 1359 u_long cmd; 1360 caddr_t data; 1361 { 1362 struct gem_softc *sc = ifp->if_softc; 1363 struct ifaddr *ifa = (struct ifaddr *)data; 1364 struct ifreq *ifr = (struct ifreq *)data; 1365 int s, error = 0; 1366 1367 s = splimp(); 1368 1369 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 1370 splx(s); 1371 return (error); 1372 } 1373 1374 switch (cmd) { 1375 1376 case SIOCSIFADDR: 1377 ifp->if_flags |= IFF_UP; 1378 1379 switch (ifa->ifa_addr->sa_family) { 1380 #ifdef INET 1381 case AF_INET: 1382 gem_init(ifp); 1383 arp_ifinit(&sc->sc_arpcom, ifa); 1384 break; 1385 #endif 1386 #ifdef NS 1387 case AF_NS: 1388 { 1389 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 1390 1391 if (ns_nullhost(*ina)) 1392 ina->x_host = 1393 *(union ns_host *)LLADDR(ifp->if_sadl); 1394 else { 1395 memcpy(LLADDR(ifp->if_sadl), 1396 ina->x_host.c_host, sizeof(sc->sc_enaddr)); 1397 } 1398 /* Set new address. */ 1399 gem_init(ifp); 1400 break; 1401 } 1402 #endif 1403 default: 1404 gem_init(ifp); 1405 break; 1406 } 1407 break; 1408 1409 case SIOCSIFFLAGS: 1410 if ((ifp->if_flags & IFF_UP) == 0 && 1411 (ifp->if_flags & IFF_RUNNING) != 0) { 1412 /* 1413 * If interface is marked down and it is running, then 1414 * stop it. 1415 */ 1416 gem_stop(ifp, 1); 1417 ifp->if_flags &= ~IFF_RUNNING; 1418 } else if ((ifp->if_flags & IFF_UP) != 0 && 1419 (ifp->if_flags & IFF_RUNNING) == 0) { 1420 /* 1421 * If interface is marked up and it is stopped, then 1422 * start it. 1423 */ 1424 gem_init(ifp); 1425 } else if ((ifp->if_flags & IFF_UP) != 0) { 1426 /* 1427 * Reset the interface to pick up changes in any other 1428 * flags that affect hardware registers. 1429 */ 1430 /*gem_stop(sc);*/ 1431 gem_init(ifp); 1432 } 1433 #ifdef HMEDEBUG 1434 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1435 #endif 1436 break; 1437 1438 case SIOCADDMULTI: 1439 case SIOCDELMULTI: 1440 error = (cmd == SIOCADDMULTI) ? 1441 ether_addmulti(ifr, &sc->sc_arpcom) : 1442 ether_delmulti(ifr, &sc->sc_arpcom); 1443 1444 if (error == ENETRESET) { 1445 /* 1446 * Multicast list has changed; set the hardware filter 1447 * accordingly. 1448 */ 1449 gem_init(ifp); 1450 error = 0; 1451 } 1452 break; 1453 1454 case SIOCGIFMEDIA: 1455 case SIOCSIFMEDIA: 1456 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1457 break; 1458 1459 default: 1460 error = EINVAL; 1461 break; 1462 } 1463 1464 splx(s); 1465 return (error); 1466 } 1467 1468 1469 void 1470 gem_shutdown(arg) 1471 void *arg; 1472 { 1473 struct gem_softc *sc = (struct gem_softc *)arg; 1474 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1475 1476 gem_stop(ifp, 1); 1477 } 1478 1479 /* 1480 * Set up the logical address filter. 1481 */ 1482 void 1483 gem_setladrf(sc) 1484 struct gem_softc *sc; 1485 { 1486 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1487 struct ether_multi *enm; 1488 struct ether_multistep step; 1489 struct arpcom *ac = &sc->sc_arpcom; 1490 bus_space_tag_t t = sc->sc_bustag; 1491 bus_space_handle_t h = sc->sc_h; 1492 u_int32_t crc, hash[16], v; 1493 int i; 1494 1495 /* Get current RX configuration */ 1496 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1497 1498 1499 /* 1500 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1501 * and hash filter. Depending on the case, the right bit will be 1502 * enabled. 1503 */ 1504 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1505 GEM_MAC_RX_PROMISC_GRP); 1506 1507 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1508 /* Turn on promiscuous mode */ 1509 v |= GEM_MAC_RX_PROMISCUOUS; 1510 ifp->if_flags |= IFF_ALLMULTI; 1511 goto chipit; 1512 } 1513 1514 /* 1515 * Set up multicast address filter by passing all multicast addresses 1516 * through a crc generator, and then using the high order 8 bits as an 1517 * index into the 256 bit logical address filter. The high order 4 1518 * bits select the word, while the other 4 bits select the bit within 1519 * the word (where bit 0 is the MSB). 1520 */ 1521 1522 /* Clear hash table */ 1523 for (i = 0; i < 16; i++) 1524 hash[i] = 0; 1525 1526 1527 ETHER_FIRST_MULTI(step, ac, enm); 1528 while (enm != NULL) { 1529 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1530 /* 1531 * We must listen to a range of multicast addresses. 1532 * For now, just accept all multicasts, rather than 1533 * trying to set only those filter bits needed to match 1534 * the range. (At this time, the only use of address 1535 * ranges is for IP multicast routing, for which the 1536 * range is big enough to require all bits set.) 1537 * XXX use the addr filter for this 1538 */ 1539 ifp->if_flags |= IFF_ALLMULTI; 1540 v |= GEM_MAC_RX_PROMISC_GRP; 1541 goto chipit; 1542 } 1543 1544 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1545 1546 /* Just want the 8 most significant bits. */ 1547 crc >>= 24; 1548 1549 /* Set the corresponding bit in the filter. */ 1550 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1551 1552 ETHER_NEXT_MULTI(step, enm); 1553 } 1554 1555 v |= GEM_MAC_RX_HASH_FILTER; 1556 ifp->if_flags &= ~IFF_ALLMULTI; 1557 1558 /* Now load the hash table into the chip (if we are using it) */ 1559 for (i = 0; i < 16; i++) { 1560 bus_space_write_4(t, h, 1561 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1562 hash[i]); 1563 } 1564 1565 chipit: 1566 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1567 } 1568 1569 int 1570 gem_encap(sc, mhead, bixp) 1571 struct gem_softc *sc; 1572 struct mbuf *mhead; 1573 u_int32_t *bixp; 1574 { 1575 u_int64_t flags; 1576 u_int32_t cur, frag, i; 1577 bus_dmamap_t map; 1578 1579 cur = frag = *bixp; 1580 1581 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, GEM_NTXDESC, 1582 MCLBYTES, 0, BUS_DMA_NOWAIT, &map) != 0) { 1583 return (ENOBUFS); 1584 } 1585 1586 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, 1587 BUS_DMA_NOWAIT) != 0) { 1588 bus_dmamap_destroy(sc->sc_dmatag, map); 1589 return (ENOBUFS); 1590 } 1591 1592 if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) { 1593 bus_dmamap_unload(sc->sc_dmatag, map); 1594 bus_dmamap_destroy(sc->sc_dmatag, map); 1595 return (ENOBUFS); 1596 } 1597 1598 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1599 BUS_DMASYNC_PREWRITE); 1600 1601 for (i = 0; i < map->dm_nsegs; i++) { 1602 sc->sc_txdescs[frag].gd_addr = 1603 GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr); 1604 flags = (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE) | 1605 (i == 0 ? GEM_TD_START_OF_PACKET : 0) | 1606 ((i == (map->dm_nsegs - 1)) ? GEM_TD_END_OF_PACKET : 0); 1607 sc->sc_txdescs[frag].gd_flags = GEM_DMA_WRITE(sc, flags); 1608 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1609 GEM_CDTXOFF(frag), sizeof(struct gem_desc), 1610 BUS_DMASYNC_PREWRITE); 1611 cur = frag; 1612 if (++frag == GEM_NTXDESC) 1613 frag = 0; 1614 } 1615 1616 sc->sc_tx_cnt += map->dm_nsegs; 1617 sc->sc_txd[cur].sd_map = map; 1618 sc->sc_txd[cur].sd_mbuf = mhead; 1619 1620 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, frag); 1621 1622 *bixp = frag; 1623 1624 /* sync descriptors */ 1625 1626 return (0); 1627 } 1628 1629 /* 1630 * Transmit interrupt. 1631 */ 1632 int 1633 gem_tint(sc, status) 1634 struct gem_softc *sc; 1635 u_int32_t status; 1636 { 1637 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1638 struct gem_sxd *sd; 1639 u_int32_t cons, hwcons; 1640 1641 hwcons = status >> 19; 1642 cons = sc->sc_tx_cons; 1643 while (cons != hwcons) { 1644 sd = &sc->sc_txd[cons]; 1645 if (sd->sd_map != NULL) { 1646 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 1647 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1648 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 1649 bus_dmamap_destroy(sc->sc_dmatag, sd->sd_map); 1650 sd->sd_map = NULL; 1651 } 1652 if (sd->sd_mbuf != NULL) { 1653 m_freem(sd->sd_mbuf); 1654 sd->sd_mbuf = NULL; 1655 } 1656 sc->sc_tx_cnt--; 1657 ifp->if_opackets++; 1658 if (++cons == GEM_NTXDESC) 1659 cons = 0; 1660 } 1661 sc->sc_tx_cons = cons; 1662 1663 gem_start(ifp); 1664 1665 if (sc->sc_tx_cnt == 0) 1666 ifp->if_timer = 0; 1667 1668 return (1); 1669 } 1670 1671 void 1672 gem_start(ifp) 1673 struct ifnet *ifp; 1674 { 1675 struct gem_softc *sc = ifp->if_softc; 1676 struct mbuf *m; 1677 u_int32_t bix; 1678 1679 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1680 return; 1681 1682 bix = sc->sc_tx_prod; 1683 while (sc->sc_txd[bix].sd_mbuf == NULL) { 1684 IFQ_POLL(&ifp->if_snd, m); 1685 if (m == NULL) 1686 break; 1687 1688 #if NBPFILTER > 0 1689 /* 1690 * If BPF is listening on this interface, let it see the 1691 * packet before we commit it to the wire. 1692 */ 1693 if (ifp->if_bpf) 1694 bpf_mtap(ifp->if_bpf, m); 1695 #endif 1696 1697 /* 1698 * Encapsulate this packet and start it going... 1699 * or fail... 1700 */ 1701 if (gem_encap(sc, m, &bix)) { 1702 ifp->if_timer = 2; 1703 break; 1704 } 1705 1706 IFQ_DEQUEUE(&ifp->if_snd, m); 1707 ifp->if_timer = 5; 1708 } 1709 1710 sc->sc_tx_prod = bix; 1711 } 1712