1 /* $OpenBSD: gem.c,v 1.90 2009/03/29 11:53:47 kettenis Exp $ */ 2 /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2001 Eduardo Horvath. 7 * All rights reserved. 8 * 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #include "bpfilter.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/timeout.h> 42 #include <sys/mbuf.h> 43 #include <sys/syslog.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <machine/endian.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/if_ether.h> 60 #endif 61 62 #if NBPFILTER > 0 63 #include <net/bpf.h> 64 #endif 65 66 #include <machine/bus.h> 67 #include <machine/intr.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 #include <dev/mii/mii_bitbang.h> 72 73 #include <dev/ic/gemreg.h> 74 #include <dev/ic/gemvar.h> 75 76 #define TRIES 10000 77 78 struct cfdriver gem_cd = { 79 NULL, "gem", DV_IFNET 80 }; 81 82 void gem_start(struct ifnet *); 83 void gem_stop(struct ifnet *); 84 int gem_ioctl(struct ifnet *, u_long, caddr_t); 85 void gem_tick(void *); 86 void gem_watchdog(struct ifnet *); 87 void gem_shutdown(void *); 88 int gem_init(struct ifnet *); 89 void gem_init_regs(struct gem_softc *); 90 int gem_ringsize(int); 91 int gem_meminit(struct gem_softc *); 92 void gem_mifinit(struct gem_softc *); 93 int gem_bitwait(struct gem_softc *, bus_space_handle_t, int, 94 u_int32_t, u_int32_t); 95 void gem_reset(struct gem_softc *); 96 int gem_reset_rx(struct gem_softc *); 97 int gem_reset_tx(struct gem_softc *); 98 int gem_disable_rx(struct gem_softc *); 99 int gem_disable_tx(struct gem_softc *); 100 void gem_rx_watchdog(void *); 101 void gem_rxdrain(struct gem_softc *); 102 void gem_fill_rx_ring(struct gem_softc *); 103 int gem_add_rxbuf(struct gem_softc *, int idx); 104 void gem_setladrf(struct gem_softc *); 105 106 /* MII methods & callbacks */ 107 int gem_mii_readreg(struct device *, int, int); 108 void gem_mii_writereg(struct device *, int, int, int); 109 void gem_mii_statchg(struct device *); 110 int gem_pcs_readreg(struct device *, int, int); 111 void gem_pcs_writereg(struct device *, int, int, int); 112 113 int gem_mediachange(struct ifnet *); 114 void gem_mediastatus(struct ifnet *, struct ifmediareq *); 115 116 int gem_eint(struct gem_softc *, u_int); 117 int gem_rint(struct gem_softc *); 118 int gem_tint(struct gem_softc *, u_int32_t); 119 int gem_pint(struct gem_softc *); 120 121 #ifdef GEM_DEBUG 122 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 123 printf x 124 #else 125 #define DPRINTF(sc, x) /* nothing */ 126 #endif 127 128 /* 129 * Attach a Gem interface to the system. 130 */ 131 void 132 gem_config(struct gem_softc *sc) 133 { 134 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 135 struct mii_data *mii = &sc->sc_mii; 136 struct mii_softc *child; 137 int i, error, phyad; 138 struct ifmedia_entry *ifm; 139 140 /* Make sure the chip is stopped. */ 141 ifp->if_softc = sc; 142 gem_reset(sc); 143 144 /* 145 * Allocate the control data structures, and create and load the 146 * DMA map for it. 147 */ 148 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 149 sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 150 1, &sc->sc_cdnseg, 0)) != 0) { 151 printf("\n%s: unable to allocate control data, error = %d\n", 152 sc->sc_dev.dv_xname, error); 153 goto fail_0; 154 } 155 156 /* XXX should map this in with correct endianness */ 157 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 158 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data, 159 BUS_DMA_COHERENT)) != 0) { 160 printf("\n%s: unable to map control data, error = %d\n", 161 sc->sc_dev.dv_xname, error); 162 goto fail_1; 163 } 164 165 if ((error = bus_dmamap_create(sc->sc_dmatag, 166 sizeof(struct gem_control_data), 1, 167 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 168 printf("\n%s: unable to create control data DMA map, " 169 "error = %d\n", sc->sc_dev.dv_xname, error); 170 goto fail_2; 171 } 172 173 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 174 sc->sc_control_data, sizeof(struct gem_control_data), NULL, 175 0)) != 0) { 176 printf("\n%s: unable to load control data DMA map, error = %d\n", 177 sc->sc_dev.dv_xname, error); 178 goto fail_3; 179 } 180 181 /* 182 * Create the receive buffer DMA maps. 183 */ 184 for (i = 0; i < GEM_NRXDESC; i++) { 185 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 186 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 187 printf("\n%s: unable to create rx DMA map %d, " 188 "error = %d\n", sc->sc_dev.dv_xname, i, error); 189 goto fail_5; 190 } 191 sc->sc_rxsoft[i].rxs_mbuf = NULL; 192 } 193 /* 194 * Create the transmit buffer DMA maps. 195 */ 196 for (i = 0; i < GEM_NTXDESC; i++) { 197 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 198 GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 199 &sc->sc_txd[i].sd_map)) != 0) { 200 printf("\n%s: unable to create tx DMA map %d, " 201 "error = %d\n", sc->sc_dev.dv_xname, i, error); 202 goto fail_6; 203 } 204 sc->sc_txd[i].sd_mbuf = NULL; 205 } 206 207 /* 208 * From this point forward, the attachment cannot fail. A failure 209 * before this point releases all resources that may have been 210 * allocated. 211 */ 212 213 /* Announce ourselves. */ 214 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 215 216 /* Get RX FIFO size */ 217 sc->sc_rxfifosize = 64 * 218 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE); 219 220 /* Initialize ifnet structure. */ 221 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 222 ifp->if_softc = sc; 223 ifp->if_flags = 224 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 225 ifp->if_start = gem_start; 226 ifp->if_ioctl = gem_ioctl; 227 ifp->if_watchdog = gem_watchdog; 228 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1); 229 IFQ_SET_READY(&ifp->if_snd); 230 231 /* Hardware reads RX descriptors in multiples of four. */ 232 m_clsetwms(ifp, MCLBYTES, 4, GEM_NRXDESC - 4); 233 234 ifp->if_capabilities = IFCAP_VLAN_MTU; 235 236 /* Initialize ifmedia structures and MII info */ 237 mii->mii_ifp = ifp; 238 mii->mii_readreg = gem_mii_readreg; 239 mii->mii_writereg = gem_mii_writereg; 240 mii->mii_statchg = gem_mii_statchg; 241 242 ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); 243 244 /* Bad things will happen if we touch this register on ERI. */ 245 if (sc->sc_variant != GEM_SUN_ERI) 246 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 247 GEM_MII_DATAPATH_MODE, 0); 248 249 gem_mifinit(sc); 250 251 /* 252 * Look for an external PHY. 253 */ 254 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 255 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 256 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 257 GEM_MIF_CONFIG, sc->sc_mif_config); 258 259 switch (sc->sc_variant) { 260 case GEM_SUN_ERI: 261 phyad = GEM_PHYAD_EXTERNAL; 262 break; 263 default: 264 phyad = MII_PHY_ANY; 265 break; 266 } 267 268 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, 269 MII_OFFSET_ANY, 0); 270 } 271 272 /* 273 * Fall back on an internal PHY if no external PHY was found. 274 */ 275 child = LIST_FIRST(&mii->mii_phys); 276 if (child == NULL && sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) { 277 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 278 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 279 GEM_MIF_CONFIG, sc->sc_mif_config); 280 281 switch (sc->sc_variant) { 282 case GEM_SUN_ERI: 283 case GEM_APPLE_K2_GMAC: 284 phyad = GEM_PHYAD_INTERNAL; 285 break; 286 case GEM_APPLE_GMAC: 287 phyad = GEM_PHYAD_EXTERNAL; 288 break; 289 default: 290 phyad = MII_PHY_ANY; 291 break; 292 } 293 294 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, 295 MII_OFFSET_ANY, 0); 296 } 297 298 /* 299 * Try the external PCS SERDES if we didn't find any MII 300 * devices. 301 */ 302 child = LIST_FIRST(&mii->mii_phys); 303 if (child == NULL && sc->sc_variant != GEM_SUN_ERI) { 304 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 305 GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES); 306 307 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 308 GEM_MII_SLINK_CONTROL, 309 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D); 310 311 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 312 GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 313 314 mii->mii_readreg = gem_pcs_readreg; 315 mii->mii_writereg = gem_pcs_writereg; 316 317 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 318 MII_OFFSET_ANY, MIIF_NOISOLATE); 319 } 320 321 child = LIST_FIRST(&mii->mii_phys); 322 if (child == NULL) { 323 /* No PHY attached */ 324 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 325 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 326 } else { 327 /* 328 * XXX - we can really do the following ONLY if the 329 * phy indeed has the auto negotiation capability!! 330 */ 331 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 332 } 333 334 /* Check if we support GigE media. */ 335 TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) { 336 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T || 337 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX || 338 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX || 339 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) { 340 sc->sc_flags |= GEM_GIGABIT; 341 break; 342 } 343 } 344 345 /* Attach the interface. */ 346 if_attach(ifp); 347 ether_ifattach(ifp); 348 349 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc); 350 if (sc->sc_sh == NULL) 351 panic("gem_config: can't establish shutdownhook"); 352 353 timeout_set(&sc->sc_tick_ch, gem_tick, sc); 354 timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc); 355 return; 356 357 /* 358 * Free any resources we've allocated during the failed attach 359 * attempt. Do this in reverse order and fall through. 360 */ 361 fail_6: 362 for (i = 0; i < GEM_NTXDESC; i++) { 363 if (sc->sc_txd[i].sd_map != NULL) 364 bus_dmamap_destroy(sc->sc_dmatag, 365 sc->sc_txd[i].sd_map); 366 } 367 fail_5: 368 for (i = 0; i < GEM_NRXDESC; i++) { 369 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 370 bus_dmamap_destroy(sc->sc_dmatag, 371 sc->sc_rxsoft[i].rxs_dmamap); 372 } 373 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 374 fail_3: 375 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 376 fail_2: 377 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 378 sizeof(struct gem_control_data)); 379 fail_1: 380 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 381 fail_0: 382 return; 383 } 384 385 386 void 387 gem_tick(void *arg) 388 { 389 struct gem_softc *sc = arg; 390 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 391 bus_space_tag_t t = sc->sc_bustag; 392 bus_space_handle_t mac = sc->sc_h1; 393 int s; 394 u_int32_t v; 395 396 /* unload collisions counters */ 397 v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 398 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 399 ifp->if_collisions += v + 400 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 401 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT); 402 ifp->if_oerrors += v; 403 404 /* read error counters */ 405 ifp->if_ierrors += 406 bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) + 407 bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) + 408 bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) + 409 bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL); 410 411 /* clear the hardware counters */ 412 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 413 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 414 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 415 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 416 bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0); 417 bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0); 418 bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0); 419 bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0); 420 421 s = splnet(); 422 mii_tick(&sc->sc_mii); 423 splx(s); 424 425 timeout_add_sec(&sc->sc_tick_ch, 1); 426 } 427 428 int 429 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, 430 u_int32_t clr, u_int32_t set) 431 { 432 int i; 433 u_int32_t reg; 434 435 for (i = TRIES; i--; DELAY(100)) { 436 reg = bus_space_read_4(sc->sc_bustag, h, r); 437 if ((reg & clr) == 0 && (reg & set) == set) 438 return (1); 439 } 440 441 return (0); 442 } 443 444 void 445 gem_reset(struct gem_softc *sc) 446 { 447 bus_space_tag_t t = sc->sc_bustag; 448 bus_space_handle_t h = sc->sc_h2; 449 int s; 450 451 s = splnet(); 452 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); 453 gem_reset_rx(sc); 454 gem_reset_tx(sc); 455 456 /* Do a full reset */ 457 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX); 458 if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 459 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname); 460 splx(s); 461 } 462 463 464 /* 465 * Drain the receive queue. 466 */ 467 void 468 gem_rxdrain(struct gem_softc *sc) 469 { 470 struct gem_rxsoft *rxs; 471 int i; 472 473 for (i = 0; i < GEM_NRXDESC; i++) { 474 rxs = &sc->sc_rxsoft[i]; 475 if (rxs->rxs_mbuf != NULL) { 476 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 477 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 478 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 479 m_freem(rxs->rxs_mbuf); 480 rxs->rxs_mbuf = NULL; 481 } 482 } 483 sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0; 484 } 485 486 /* 487 * Reset the whole thing. 488 */ 489 void 490 gem_stop(struct ifnet *ifp) 491 { 492 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 493 struct gem_sxd *sd; 494 u_int32_t i; 495 496 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); 497 498 timeout_del(&sc->sc_tick_ch); 499 500 /* 501 * Mark the interface down and cancel the watchdog timer. 502 */ 503 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 504 ifp->if_timer = 0; 505 506 mii_down(&sc->sc_mii); 507 508 gem_reset_rx(sc); 509 gem_reset_tx(sc); 510 511 /* 512 * Release any queued transmit buffers. 513 */ 514 for (i = 0; i < GEM_NTXDESC; i++) { 515 sd = &sc->sc_txd[i]; 516 if (sd->sd_mbuf != NULL) { 517 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 518 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 519 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 520 m_freem(sd->sd_mbuf); 521 sd->sd_mbuf = NULL; 522 } 523 } 524 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 525 526 gem_rxdrain(sc); 527 } 528 529 530 /* 531 * Reset the receiver 532 */ 533 int 534 gem_reset_rx(struct gem_softc *sc) 535 { 536 bus_space_tag_t t = sc->sc_bustag; 537 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 538 539 /* 540 * Resetting while DMA is in progress can cause a bus hang, so we 541 * disable DMA first. 542 */ 543 gem_disable_rx(sc); 544 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 545 /* Wait till it finishes */ 546 if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0)) 547 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname); 548 /* Wait 5ms extra. */ 549 delay(5000); 550 551 /* Finally, reset the ERX */ 552 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX); 553 /* Wait till it finishes */ 554 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) { 555 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname); 556 return (1); 557 } 558 return (0); 559 } 560 561 562 /* 563 * Reset the transmitter 564 */ 565 int 566 gem_reset_tx(struct gem_softc *sc) 567 { 568 bus_space_tag_t t = sc->sc_bustag; 569 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 570 571 /* 572 * Resetting while DMA is in progress can cause a bus hang, so we 573 * disable DMA first. 574 */ 575 gem_disable_tx(sc); 576 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 577 /* Wait till it finishes */ 578 if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0)) 579 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname); 580 /* Wait 5ms extra. */ 581 delay(5000); 582 583 /* Finally, reset the ETX */ 584 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX); 585 /* Wait till it finishes */ 586 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) { 587 printf("%s: cannot reset transmitter\n", 588 sc->sc_dev.dv_xname); 589 return (1); 590 } 591 return (0); 592 } 593 594 /* 595 * Disable receiver. 596 */ 597 int 598 gem_disable_rx(struct gem_softc *sc) 599 { 600 bus_space_tag_t t = sc->sc_bustag; 601 bus_space_handle_t h = sc->sc_h1; 602 u_int32_t cfg; 603 604 /* Flip the enable bit */ 605 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 606 cfg &= ~GEM_MAC_RX_ENABLE; 607 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 608 609 /* Wait for it to finish */ 610 return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 611 } 612 613 /* 614 * Disable transmitter. 615 */ 616 int 617 gem_disable_tx(struct gem_softc *sc) 618 { 619 bus_space_tag_t t = sc->sc_bustag; 620 bus_space_handle_t h = sc->sc_h1; 621 u_int32_t cfg; 622 623 /* Flip the enable bit */ 624 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 625 cfg &= ~GEM_MAC_TX_ENABLE; 626 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 627 628 /* Wait for it to finish */ 629 return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 630 } 631 632 /* 633 * Initialize interface. 634 */ 635 int 636 gem_meminit(struct gem_softc *sc) 637 { 638 int i; 639 640 /* 641 * Initialize the transmit descriptor ring. 642 */ 643 for (i = 0; i < GEM_NTXDESC; i++) { 644 sc->sc_txdescs[i].gd_flags = 0; 645 sc->sc_txdescs[i].gd_addr = 0; 646 } 647 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 648 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 649 650 /* 651 * Initialize the receive descriptor and receive job 652 * descriptor rings. 653 */ 654 for (i = 0; i < GEM_NRXDESC; i++) { 655 sc->sc_rxdescs[i].gd_flags = 0; 656 sc->sc_rxdescs[i].gd_addr = 0; 657 } 658 gem_fill_rx_ring(sc); 659 660 return (0); 661 } 662 663 int 664 gem_ringsize(int sz) 665 { 666 switch (sz) { 667 case 32: 668 return GEM_RING_SZ_32; 669 case 64: 670 return GEM_RING_SZ_64; 671 case 128: 672 return GEM_RING_SZ_128; 673 case 256: 674 return GEM_RING_SZ_256; 675 case 512: 676 return GEM_RING_SZ_512; 677 case 1024: 678 return GEM_RING_SZ_1024; 679 case 2048: 680 return GEM_RING_SZ_2048; 681 case 4096: 682 return GEM_RING_SZ_4096; 683 case 8192: 684 return GEM_RING_SZ_8192; 685 default: 686 printf("gem: invalid Receive Descriptor ring size %d\n", sz); 687 return GEM_RING_SZ_32; 688 } 689 } 690 691 /* 692 * Initialization of interface; set up initialization block 693 * and transmit/receive descriptor rings. 694 */ 695 int 696 gem_init(struct ifnet *ifp) 697 { 698 699 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 700 bus_space_tag_t t = sc->sc_bustag; 701 bus_space_handle_t h = sc->sc_h1; 702 int s; 703 u_int max_frame_size; 704 u_int32_t v; 705 706 s = splnet(); 707 708 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); 709 /* 710 * Initialization sequence. The numbered steps below correspond 711 * to the sequence outlined in section 6.3.5.1 in the Ethernet 712 * Channel Engine manual (part of the PCIO manual). 713 * See also the STP2002-STQ document from Sun Microsystems. 714 */ 715 716 /* step 1 & 2. Reset the Ethernet Channel */ 717 gem_stop(ifp); 718 gem_reset(sc); 719 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); 720 721 /* Re-initialize the MIF */ 722 gem_mifinit(sc); 723 724 /* Call MI reset function if any */ 725 if (sc->sc_hwreset) 726 (*sc->sc_hwreset)(sc); 727 728 /* step 3. Setup data structures in host memory */ 729 gem_meminit(sc); 730 731 /* step 4. TX MAC registers & counters */ 732 gem_init_regs(sc); 733 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 734 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 735 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 736 737 /* step 5. RX MAC registers & counters */ 738 gem_setladrf(sc); 739 740 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 741 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 742 (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32)); 743 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 744 745 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 746 (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32)); 747 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 748 749 /* step 8. Global Configuration & Interrupt Mask */ 750 bus_space_write_4(t, h, GEM_INTMASK, 751 ~(GEM_INTR_TX_INTME| 752 GEM_INTR_TX_EMPTY| 753 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 754 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 755 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 756 GEM_INTR_BERR)); 757 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 758 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 759 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 760 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 761 762 /* step 9. ETX Configuration: use mostly default values */ 763 764 /* Enable DMA */ 765 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 766 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) & 767 GEM_TX_CONFIG_TXFIFO_TH; 768 bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 769 bus_space_write_4(t, h, GEM_TX_KICK, 0); 770 771 /* step 10. ERX Configuration */ 772 773 /* Encode Receive Descriptor ring size: four possible values */ 774 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 775 776 /* Enable DMA */ 777 bus_space_write_4(t, h, GEM_RX_CONFIG, 778 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 779 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 780 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 781 /* 782 * The following value is for an OFF Threshold of about 3/4 full 783 * and an ON Threshold of 1/4 full. 784 */ 785 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 786 (3 * sc->sc_rxfifosize / 256) | 787 ( (sc->sc_rxfifosize / 256) << 12)); 788 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 789 790 /* step 11. Configure Media */ 791 mii_mediachg(&sc->sc_mii); 792 793 /* step 12. RX_MAC Configuration Register */ 794 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 795 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 796 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 797 798 /* step 14. Issue Transmit Pending command */ 799 800 /* Call MI initialization function if any */ 801 if (sc->sc_hwinit) 802 (*sc->sc_hwinit)(sc); 803 804 /* step 15. Give the receiver a swift kick */ 805 bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod); 806 807 /* Start the one second timer. */ 808 timeout_add_sec(&sc->sc_tick_ch, 1); 809 810 ifp->if_flags |= IFF_RUNNING; 811 ifp->if_flags &= ~IFF_OACTIVE; 812 ifp->if_timer = 0; 813 splx(s); 814 815 return (0); 816 } 817 818 void 819 gem_init_regs(struct gem_softc *sc) 820 { 821 bus_space_tag_t t = sc->sc_bustag; 822 bus_space_handle_t h = sc->sc_h1; 823 u_int32_t v; 824 825 /* These regs are not cleared on reset */ 826 sc->sc_inited = 0; 827 if (!sc->sc_inited) { 828 829 /* Wooo. Magic values. */ 830 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 831 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 832 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 833 834 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 835 /* Max frame and max burst size */ 836 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 837 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 838 839 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 840 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 841 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 842 /* Dunno.... */ 843 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 844 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 845 ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff); 846 847 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 848 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 849 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 850 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 851 /* MAC control addr set to 0:1:c2:0:1:80 */ 852 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 853 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 854 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 855 856 /* MAC filter addr set to 0:0:0:0:0:0 */ 857 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 858 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 859 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 860 861 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 862 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 863 864 sc->sc_inited = 1; 865 } 866 867 /* Counters need to be zeroed */ 868 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 869 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 870 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 871 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 872 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 873 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 874 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 875 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 876 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 877 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 878 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 879 880 /* Un-pause stuff */ 881 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 882 883 /* 884 * Set the internal arbitration to "infinite" bursts of the 885 * maximum length of 31 * 64 bytes so DMA transfers aren't 886 * split up in cache line size chunks. This greatly improves 887 * especially RX performance. 888 * Enable silicon bug workarounds for the Apple variants. 889 */ 890 v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT; 891 if (sc->sc_pci) 892 v |= GEM_CONFIG_BURST_INF; 893 else 894 v |= GEM_CONFIG_BURST_64; 895 if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI) 896 v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX; 897 bus_space_write_4(t, h, GEM_CONFIG, v); 898 899 /* 900 * Set the station address. 901 */ 902 bus_space_write_4(t, h, GEM_MAC_ADDR0, 903 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 904 bus_space_write_4(t, h, GEM_MAC_ADDR1, 905 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 906 bus_space_write_4(t, h, GEM_MAC_ADDR2, 907 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 908 } 909 910 /* 911 * Receive interrupt. 912 */ 913 int 914 gem_rint(struct gem_softc *sc) 915 { 916 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 917 bus_space_tag_t t = sc->sc_bustag; 918 bus_space_handle_t h = sc->sc_h1; 919 struct gem_rxsoft *rxs; 920 struct mbuf *m; 921 u_int64_t rxstat; 922 int i, len; 923 924 for (i = sc->sc_rx_cons; sc->sc_rx_cnt > 0; i = GEM_NEXTRX(i)) { 925 rxs = &sc->sc_rxsoft[i]; 926 927 GEM_CDRXSYNC(sc, i, 928 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 929 930 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 931 932 if (rxstat & GEM_RD_OWN) { 933 /* We have processed all of the receive buffers. */ 934 break; 935 } 936 937 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 938 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 939 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 940 941 m = rxs->rxs_mbuf; 942 rxs->rxs_mbuf = NULL; 943 944 sc->sc_rx_cnt--; 945 946 if (rxstat & GEM_RD_BAD_CRC) { 947 ifp->if_ierrors++; 948 #ifdef GEM_DEBUG 949 printf("%s: receive error: CRC error\n", 950 sc->sc_dev.dv_xname); 951 #endif 952 m_freem(m); 953 continue; 954 } 955 956 #ifdef GEM_DEBUG 957 if (ifp->if_flags & IFF_DEBUG) { 958 printf(" rxsoft %p descriptor %d: ", rxs, i); 959 printf("gd_flags: 0x%016llx\t", (long long) 960 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 961 printf("gd_addr: 0x%016llx\n", (long long) 962 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 963 } 964 #endif 965 966 /* No errors; receive the packet. */ 967 len = GEM_RD_BUFLEN(rxstat); 968 969 m->m_data += 2; /* We're already off by two */ 970 971 ifp->if_ipackets++; 972 m->m_pkthdr.rcvif = ifp; 973 m->m_pkthdr.len = m->m_len = len; 974 975 #if NBPFILTER > 0 976 if (ifp->if_bpf) 977 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 978 #endif /* NBPFILTER > 0 */ 979 980 /* Pass it on. */ 981 ether_input_mbuf(ifp, m); 982 } 983 984 /* Update the receive pointer. */ 985 sc->sc_rx_cons = i; 986 gem_fill_rx_ring(sc); 987 bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod); 988 989 DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n", 990 sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 991 992 return (1); 993 } 994 995 void 996 gem_fill_rx_ring(struct gem_softc *sc) 997 { 998 while (sc->sc_rx_cnt < (GEM_NRXDESC - 4)) { 999 if (gem_add_rxbuf(sc, sc->sc_rx_prod)) 1000 break; 1001 } 1002 } 1003 1004 /* 1005 * Add a receive buffer to the indicated descriptor. 1006 */ 1007 int 1008 gem_add_rxbuf(struct gem_softc *sc, int idx) 1009 { 1010 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1011 struct mbuf *m; 1012 int error; 1013 1014 MGETHDR(m, M_DONTWAIT, MT_DATA); 1015 if (m == NULL) 1016 return (ENOBUFS); 1017 1018 MCLGETI(m, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES); 1019 if ((m->m_flags & M_EXT) == 0) { 1020 m_freem(m); 1021 return (ENOBUFS); 1022 } 1023 m->m_len = m->m_pkthdr.len = MCLBYTES; 1024 1025 #ifdef GEM_DEBUG 1026 /* bzero the packet to check dma */ 1027 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1028 #endif 1029 1030 rxs->rxs_mbuf = m; 1031 1032 error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m, 1033 BUS_DMA_READ|BUS_DMA_NOWAIT); 1034 if (error) { 1035 printf("%s: can't load rx DMA map %d, error = %d\n", 1036 sc->sc_dev.dv_xname, idx, error); 1037 panic("gem_add_rxbuf"); /* XXX */ 1038 } 1039 1040 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1041 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1042 1043 GEM_INIT_RXDESC(sc, idx); 1044 1045 sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod); 1046 sc->sc_rx_cnt++; 1047 1048 return (0); 1049 } 1050 1051 int 1052 gem_eint(struct gem_softc *sc, u_int status) 1053 { 1054 if ((status & GEM_INTR_MIF) != 0) { 1055 #ifdef GEM_DEBUG 1056 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1057 #endif 1058 return (1); 1059 } 1060 1061 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS); 1062 return (1); 1063 } 1064 1065 int 1066 gem_pint(struct gem_softc *sc) 1067 { 1068 bus_space_tag_t t = sc->sc_bustag; 1069 bus_space_handle_t seb = sc->sc_h1; 1070 u_int32_t status; 1071 1072 status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS); 1073 status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS); 1074 #ifdef GEM_DEBUG 1075 if (status) 1076 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1077 #endif 1078 return (1); 1079 } 1080 1081 int 1082 gem_intr(void *v) 1083 { 1084 struct gem_softc *sc = (struct gem_softc *)v; 1085 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1086 bus_space_tag_t t = sc->sc_bustag; 1087 bus_space_handle_t seb = sc->sc_h1; 1088 u_int32_t status; 1089 int r = 0; 1090 1091 status = bus_space_read_4(t, seb, GEM_STATUS); 1092 DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", 1093 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); 1094 1095 if ((status & GEM_INTR_PCS) != 0) 1096 r |= gem_pint(sc); 1097 1098 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1099 r |= gem_eint(sc, status); 1100 1101 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1102 r |= gem_tint(sc, status); 1103 1104 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1105 r |= gem_rint(sc); 1106 1107 /* We should eventually do more than just print out error stats. */ 1108 if (status & GEM_INTR_TX_MAC) { 1109 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1110 #ifdef GEM_DEBUG 1111 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1112 printf("%s: MAC tx fault, status %x\n", 1113 sc->sc_dev.dv_xname, txstat); 1114 #endif 1115 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1116 gem_init(ifp); 1117 } 1118 if (status & GEM_INTR_RX_MAC) { 1119 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1120 #ifdef GEM_DEBUG 1121 if (rxstat & ~GEM_MAC_RX_DONE) 1122 printf("%s: MAC rx fault, status %x\n", 1123 sc->sc_dev.dv_xname, rxstat); 1124 #endif 1125 if (rxstat & GEM_MAC_RX_OVERFLOW) { 1126 ifp->if_ierrors++; 1127 1128 /* 1129 * Apparently a silicon bug causes ERI to hang 1130 * from time to time. So if we detect an RX 1131 * FIFO overflow, we fire off a timer, and 1132 * check whether we're still making progress 1133 * by looking at the RX FIFO write and read 1134 * pointers. 1135 */ 1136 sc->sc_rx_fifo_wr_ptr = 1137 bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR); 1138 sc->sc_rx_fifo_rd_ptr = 1139 bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR); 1140 timeout_add_msec(&sc->sc_rx_watchdog, 400); 1141 } 1142 #ifdef GEM_DEBUG 1143 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1144 printf("%s: MAC rx fault, status %x\n", 1145 sc->sc_dev.dv_xname, rxstat); 1146 #endif 1147 } 1148 return (r); 1149 } 1150 1151 void 1152 gem_rx_watchdog(void *arg) 1153 { 1154 struct gem_softc *sc = arg; 1155 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1156 bus_space_tag_t t = sc->sc_bustag; 1157 bus_space_handle_t h = sc->sc_h1; 1158 u_int32_t rx_fifo_wr_ptr; 1159 u_int32_t rx_fifo_rd_ptr; 1160 u_int32_t state; 1161 1162 if ((ifp->if_flags & IFF_RUNNING) == 0) 1163 return; 1164 1165 rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR); 1166 rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR); 1167 state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE); 1168 if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW && 1169 ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) || 1170 ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) && 1171 (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr)))) { 1172 /* 1173 * The RX state machine is still in overflow state and 1174 * the RX FIFO write and read pointers seem to be 1175 * stuck. Whack the chip over the head to get things 1176 * going again. 1177 */ 1178 gem_init(ifp); 1179 } 1180 } 1181 1182 void 1183 gem_watchdog(struct ifnet *ifp) 1184 { 1185 struct gem_softc *sc = ifp->if_softc; 1186 1187 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1188 "GEM_MAC_RX_CONFIG %x\n", 1189 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), 1190 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), 1191 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); 1192 1193 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1194 ++ifp->if_oerrors; 1195 1196 /* Try to get more packets going. */ 1197 gem_init(ifp); 1198 } 1199 1200 /* 1201 * Initialize the MII Management Interface 1202 */ 1203 void 1204 gem_mifinit(struct gem_softc *sc) 1205 { 1206 bus_space_tag_t t = sc->sc_bustag; 1207 bus_space_handle_t mif = sc->sc_h1; 1208 1209 /* Configure the MIF in frame mode */ 1210 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1211 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1212 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1213 } 1214 1215 /* 1216 * MII interface 1217 * 1218 * The GEM MII interface supports at least three different operating modes: 1219 * 1220 * Bitbang mode is implemented using data, clock and output enable registers. 1221 * 1222 * Frame mode is implemented by loading a complete frame into the frame 1223 * register and polling the valid bit for completion. 1224 * 1225 * Polling mode uses the frame register but completion is indicated by 1226 * an interrupt. 1227 * 1228 */ 1229 int 1230 gem_mii_readreg(struct device *self, int phy, int reg) 1231 { 1232 struct gem_softc *sc = (void *)self; 1233 bus_space_tag_t t = sc->sc_bustag; 1234 bus_space_handle_t mif = sc->sc_h1; 1235 int n; 1236 u_int32_t v; 1237 1238 #ifdef GEM_DEBUG 1239 if (sc->sc_debug) 1240 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1241 #endif 1242 1243 /* Construct the frame command */ 1244 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1245 GEM_MIF_FRAME_READ; 1246 1247 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1248 for (n = 0; n < 100; n++) { 1249 DELAY(1); 1250 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1251 if (v & GEM_MIF_FRAME_TA0) 1252 return (v & GEM_MIF_FRAME_DATA); 1253 } 1254 1255 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1256 return (0); 1257 } 1258 1259 void 1260 gem_mii_writereg(struct device *self, int phy, int reg, int val) 1261 { 1262 struct gem_softc *sc = (void *)self; 1263 bus_space_tag_t t = sc->sc_bustag; 1264 bus_space_handle_t mif = sc->sc_h1; 1265 int n; 1266 u_int32_t v; 1267 1268 #ifdef GEM_DEBUG 1269 if (sc->sc_debug) 1270 printf("gem_mii_writereg: phy %d reg %d val %x\n", 1271 phy, reg, val); 1272 #endif 1273 1274 /* Construct the frame command */ 1275 v = GEM_MIF_FRAME_WRITE | 1276 (phy << GEM_MIF_PHY_SHIFT) | 1277 (reg << GEM_MIF_REG_SHIFT) | 1278 (val & GEM_MIF_FRAME_DATA); 1279 1280 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1281 for (n = 0; n < 100; n++) { 1282 DELAY(1); 1283 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1284 if (v & GEM_MIF_FRAME_TA0) 1285 return; 1286 } 1287 1288 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1289 } 1290 1291 void 1292 gem_mii_statchg(struct device *dev) 1293 { 1294 struct gem_softc *sc = (void *)dev; 1295 #ifdef GEM_DEBUG 1296 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1297 #endif 1298 bus_space_tag_t t = sc->sc_bustag; 1299 bus_space_handle_t mac = sc->sc_h1; 1300 u_int32_t v; 1301 1302 #ifdef GEM_DEBUG 1303 if (sc->sc_debug) 1304 printf("gem_mii_statchg: status change: phy = %d\n", instance); 1305 #endif 1306 1307 /* Set tx full duplex options */ 1308 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1309 delay(10000); /* reg must be cleared and delay before changing. */ 1310 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1311 GEM_MAC_TX_ENABLE; 1312 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1313 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1314 } 1315 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1316 1317 /* XIF Configuration */ 1318 v = GEM_MAC_XIF_TX_MII_ENA; 1319 v |= GEM_MAC_XIF_LINK_LED; 1320 1321 /* External MII needs echo disable if half duplex. */ 1322 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1323 /* turn on full duplex LED */ 1324 v |= GEM_MAC_XIF_FDPLX_LED; 1325 else 1326 /* half duplex -- disable echo */ 1327 v |= GEM_MAC_XIF_ECHO_DISABL; 1328 1329 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1330 case IFM_1000_T: /* Gigabit using GMII interface */ 1331 case IFM_1000_SX: 1332 v |= GEM_MAC_XIF_GMII_MODE; 1333 break; 1334 default: 1335 v &= ~GEM_MAC_XIF_GMII_MODE; 1336 } 1337 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1338 } 1339 1340 int 1341 gem_pcs_readreg(struct device *self, int phy, int reg) 1342 { 1343 struct gem_softc *sc = (void *)self; 1344 bus_space_tag_t t = sc->sc_bustag; 1345 bus_space_handle_t pcs = sc->sc_h1; 1346 1347 #ifdef GEM_DEBUG 1348 if (sc->sc_debug) 1349 printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg); 1350 #endif 1351 1352 if (phy != GEM_PHYAD_EXTERNAL) 1353 return (0); 1354 1355 switch (reg) { 1356 case MII_BMCR: 1357 reg = GEM_MII_CONTROL; 1358 break; 1359 case MII_BMSR: 1360 reg = GEM_MII_STATUS; 1361 break; 1362 case MII_ANAR: 1363 reg = GEM_MII_ANAR; 1364 break; 1365 case MII_ANLPAR: 1366 reg = GEM_MII_ANLPAR; 1367 break; 1368 case MII_EXTSR: 1369 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1370 default: 1371 return (0); 1372 } 1373 1374 return bus_space_read_4(t, pcs, reg); 1375 } 1376 1377 void 1378 gem_pcs_writereg(struct device *self, int phy, int reg, int val) 1379 { 1380 struct gem_softc *sc = (void *)self; 1381 bus_space_tag_t t = sc->sc_bustag; 1382 bus_space_handle_t pcs = sc->sc_h1; 1383 int reset = 0; 1384 1385 #ifdef GEM_DEBUG 1386 if (sc->sc_debug) 1387 printf("gem_pcs_writereg: phy %d reg %d val %x\n", 1388 phy, reg, val); 1389 #endif 1390 1391 if (phy != GEM_PHYAD_EXTERNAL) 1392 return; 1393 1394 if (reg == MII_ANAR) 1395 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0); 1396 1397 switch (reg) { 1398 case MII_BMCR: 1399 reset = (val & GEM_MII_CONTROL_RESET); 1400 reg = GEM_MII_CONTROL; 1401 break; 1402 case MII_BMSR: 1403 reg = GEM_MII_STATUS; 1404 break; 1405 case MII_ANAR: 1406 reg = GEM_MII_ANAR; 1407 break; 1408 case MII_ANLPAR: 1409 reg = GEM_MII_ANLPAR; 1410 break; 1411 default: 1412 return; 1413 } 1414 1415 bus_space_write_4(t, pcs, reg, val); 1416 1417 if (reset) 1418 gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0); 1419 1420 if (reg == GEM_MII_ANAR || reset) { 1421 bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL, 1422 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D); 1423 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 1424 GEM_MII_CONFIG_ENABLE); 1425 } 1426 } 1427 1428 int 1429 gem_mediachange(struct ifnet *ifp) 1430 { 1431 struct gem_softc *sc = ifp->if_softc; 1432 struct mii_data *mii = &sc->sc_mii; 1433 1434 if (mii->mii_instance) { 1435 struct mii_softc *miisc; 1436 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1437 mii_phy_reset(miisc); 1438 } 1439 1440 return (mii_mediachg(&sc->sc_mii)); 1441 } 1442 1443 void 1444 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1445 { 1446 struct gem_softc *sc = ifp->if_softc; 1447 1448 mii_pollstat(&sc->sc_mii); 1449 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1450 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1451 } 1452 1453 /* 1454 * Process an ioctl request. 1455 */ 1456 int 1457 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1458 { 1459 struct gem_softc *sc = ifp->if_softc; 1460 struct ifaddr *ifa = (struct ifaddr *)data; 1461 struct ifreq *ifr = (struct ifreq *)data; 1462 int s, error = 0; 1463 1464 s = splnet(); 1465 1466 switch (cmd) { 1467 case SIOCSIFADDR: 1468 ifp->if_flags |= IFF_UP; 1469 if ((ifp->if_flags & IFF_RUNNING) == 0) 1470 gem_init(ifp); 1471 #ifdef INET 1472 if (ifa->ifa_addr->sa_family == AF_INET) 1473 arp_ifinit(&sc->sc_arpcom, ifa); 1474 #endif 1475 break; 1476 1477 case SIOCSIFFLAGS: 1478 if (ifp->if_flags & IFF_UP) { 1479 if (ifp->if_flags & IFF_RUNNING) 1480 gem_setladrf(sc); 1481 else 1482 gem_init(ifp); 1483 } else { 1484 if (ifp->if_flags & IFF_RUNNING) 1485 gem_stop(ifp); 1486 } 1487 #ifdef GEM_DEBUG 1488 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1489 #endif 1490 break; 1491 1492 case SIOCGIFMEDIA: 1493 case SIOCSIFMEDIA: 1494 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1495 break; 1496 1497 default: 1498 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1499 } 1500 1501 if (error == ENETRESET) { 1502 if (ifp->if_flags & IFF_RUNNING) 1503 gem_setladrf(sc); 1504 error = 0; 1505 } 1506 1507 splx(s); 1508 return (error); 1509 } 1510 1511 1512 void 1513 gem_shutdown(void *arg) 1514 { 1515 struct gem_softc *sc = (struct gem_softc *)arg; 1516 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1517 1518 gem_stop(ifp); 1519 } 1520 1521 /* 1522 * Set up the logical address filter. 1523 */ 1524 void 1525 gem_setladrf(struct gem_softc *sc) 1526 { 1527 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1528 struct ether_multi *enm; 1529 struct ether_multistep step; 1530 struct arpcom *ac = &sc->sc_arpcom; 1531 bus_space_tag_t t = sc->sc_bustag; 1532 bus_space_handle_t h = sc->sc_h1; 1533 u_int32_t crc, hash[16], v; 1534 int i; 1535 1536 /* Get current RX configuration */ 1537 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1538 1539 1540 /* 1541 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1542 * and hash filter. Depending on the case, the right bit will be 1543 * enabled. 1544 */ 1545 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1546 GEM_MAC_RX_PROMISC_GRP); 1547 1548 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1549 /* Turn on promiscuous mode */ 1550 v |= GEM_MAC_RX_PROMISCUOUS; 1551 ifp->if_flags |= IFF_ALLMULTI; 1552 goto chipit; 1553 } 1554 1555 /* 1556 * Set up multicast address filter by passing all multicast addresses 1557 * through a crc generator, and then using the high order 8 bits as an 1558 * index into the 256 bit logical address filter. The high order 4 1559 * bits selects the word, while the other 4 bits select the bit within 1560 * the word (where bit 0 is the MSB). 1561 */ 1562 1563 /* Clear hash table */ 1564 for (i = 0; i < 16; i++) 1565 hash[i] = 0; 1566 1567 1568 ETHER_FIRST_MULTI(step, ac, enm); 1569 while (enm != NULL) { 1570 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1571 /* 1572 * We must listen to a range of multicast addresses. 1573 * For now, just accept all multicasts, rather than 1574 * trying to set only those filter bits needed to match 1575 * the range. (At this time, the only use of address 1576 * ranges is for IP multicast routing, for which the 1577 * range is big enough to require all bits set.) 1578 * XXX use the addr filter for this 1579 */ 1580 ifp->if_flags |= IFF_ALLMULTI; 1581 v |= GEM_MAC_RX_PROMISC_GRP; 1582 goto chipit; 1583 } 1584 1585 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1586 1587 /* Just want the 8 most significant bits. */ 1588 crc >>= 24; 1589 1590 /* Set the corresponding bit in the filter. */ 1591 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1592 1593 ETHER_NEXT_MULTI(step, enm); 1594 } 1595 1596 v |= GEM_MAC_RX_HASH_FILTER; 1597 ifp->if_flags &= ~IFF_ALLMULTI; 1598 1599 /* Now load the hash table into the chip (if we are using it) */ 1600 for (i = 0; i < 16; i++) { 1601 bus_space_write_4(t, h, 1602 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1603 hash[i]); 1604 } 1605 1606 chipit: 1607 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1608 } 1609 1610 /* 1611 * Transmit interrupt. 1612 */ 1613 int 1614 gem_tint(struct gem_softc *sc, u_int32_t status) 1615 { 1616 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1617 struct gem_sxd *sd; 1618 u_int32_t cons, hwcons; 1619 1620 hwcons = status >> 19; 1621 cons = sc->sc_tx_cons; 1622 while (cons != hwcons) { 1623 sd = &sc->sc_txd[cons]; 1624 if (sd->sd_mbuf != NULL) { 1625 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 1626 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1627 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 1628 m_freem(sd->sd_mbuf); 1629 sd->sd_mbuf = NULL; 1630 ifp->if_opackets++; 1631 } 1632 sc->sc_tx_cnt--; 1633 if (++cons == GEM_NTXDESC) 1634 cons = 0; 1635 } 1636 sc->sc_tx_cons = cons; 1637 1638 if (sc->sc_tx_cnt < GEM_NTXDESC - 2) 1639 ifp->if_flags &= ~IFF_OACTIVE; 1640 if (sc->sc_tx_cnt == 0) 1641 ifp->if_timer = 0; 1642 1643 gem_start(ifp); 1644 1645 return (1); 1646 } 1647 1648 void 1649 gem_start(struct ifnet *ifp) 1650 { 1651 struct gem_softc *sc = ifp->if_softc; 1652 struct mbuf *m; 1653 u_int64_t flags; 1654 bus_dmamap_t map; 1655 u_int32_t cur, frag, i; 1656 int error; 1657 1658 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1659 return; 1660 1661 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) { 1662 IFQ_POLL(&ifp->if_snd, m); 1663 if (m == NULL) 1664 break; 1665 1666 /* 1667 * Encapsulate this packet and start it going... 1668 * or fail... 1669 */ 1670 1671 cur = frag = sc->sc_tx_prod; 1672 map = sc->sc_txd[cur].sd_map; 1673 1674 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1675 BUS_DMA_NOWAIT); 1676 if (error != 0 && error != EFBIG) 1677 goto drop; 1678 if (error != 0) { 1679 /* Too many fragments, linearize. */ 1680 if (m_defrag(m, M_DONTWAIT)) 1681 goto drop; 1682 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1683 BUS_DMA_NOWAIT); 1684 if (error != 0) 1685 goto drop; 1686 } 1687 1688 if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) { 1689 bus_dmamap_unload(sc->sc_dmatag, map); 1690 ifp->if_flags |= IFF_OACTIVE; 1691 break; 1692 } 1693 1694 /* We are now committed to transmitting the packet. */ 1695 IFQ_DEQUEUE(&ifp->if_snd, m); 1696 1697 #if NBPFILTER > 0 1698 /* 1699 * If BPF is listening on this interface, let it see the 1700 * packet before we commit it to the wire. 1701 */ 1702 if (ifp->if_bpf) 1703 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1704 #endif 1705 1706 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1707 BUS_DMASYNC_PREWRITE); 1708 1709 for (i = 0; i < map->dm_nsegs; i++) { 1710 sc->sc_txdescs[frag].gd_addr = 1711 GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr); 1712 flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE; 1713 if (i == 0) 1714 flags |= GEM_TD_START_OF_PACKET; 1715 if (i == (map->dm_nsegs - 1)) 1716 flags |= GEM_TD_END_OF_PACKET; 1717 sc->sc_txdescs[frag].gd_flags = 1718 GEM_DMA_WRITE(sc, flags); 1719 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1720 GEM_CDTXOFF(frag), sizeof(struct gem_desc), 1721 BUS_DMASYNC_PREWRITE); 1722 cur = frag; 1723 if (++frag == GEM_NTXDESC) 1724 frag = 0; 1725 } 1726 1727 sc->sc_tx_cnt += map->dm_nsegs; 1728 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map; 1729 sc->sc_txd[cur].sd_map = map; 1730 sc->sc_txd[cur].sd_mbuf = m; 1731 1732 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag); 1733 sc->sc_tx_prod = frag; 1734 1735 ifp->if_timer = 5; 1736 } 1737 1738 return; 1739 1740 drop: 1741 IFQ_DEQUEUE(&ifp->if_snd, m); 1742 m_freem(m); 1743 ifp->if_oerrors++; 1744 } 1745