1 /* $OpenBSD: gem.c,v 1.78 2008/09/10 14:01:22 blambert Exp $ */ 2 /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2001 Eduardo Horvath. 7 * All rights reserved. 8 * 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #include "bpfilter.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/timeout.h> 42 #include <sys/mbuf.h> 43 #include <sys/syslog.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <machine/endian.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/if_ether.h> 60 #endif 61 62 #if NBPFILTER > 0 63 #include <net/bpf.h> 64 #endif 65 66 #include <machine/bus.h> 67 #include <machine/intr.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 #include <dev/mii/mii_bitbang.h> 72 73 #include <dev/ic/gemreg.h> 74 #include <dev/ic/gemvar.h> 75 76 #define TRIES 10000 77 78 struct cfdriver gem_cd = { 79 NULL, "gem", DV_IFNET 80 }; 81 82 void gem_start(struct ifnet *); 83 void gem_stop(struct ifnet *, int); 84 int gem_ioctl(struct ifnet *, u_long, caddr_t); 85 void gem_tick(void *); 86 void gem_watchdog(struct ifnet *); 87 void gem_shutdown(void *); 88 int gem_init(struct ifnet *); 89 void gem_init_regs(struct gem_softc *); 90 int gem_ringsize(int); 91 int gem_meminit(struct gem_softc *); 92 void gem_mifinit(struct gem_softc *); 93 int gem_bitwait(struct gem_softc *, bus_space_handle_t, int, 94 u_int32_t, u_int32_t); 95 void gem_reset(struct gem_softc *); 96 int gem_reset_rx(struct gem_softc *); 97 int gem_reset_tx(struct gem_softc *); 98 int gem_disable_rx(struct gem_softc *); 99 int gem_disable_tx(struct gem_softc *); 100 void gem_rxdrain(struct gem_softc *); 101 int gem_add_rxbuf(struct gem_softc *, int idx); 102 void gem_setladrf(struct gem_softc *); 103 104 /* MII methods & callbacks */ 105 int gem_mii_readreg(struct device *, int, int); 106 void gem_mii_writereg(struct device *, int, int, int); 107 void gem_mii_statchg(struct device *); 108 int gem_pcs_readreg(struct device *, int, int); 109 void gem_pcs_writereg(struct device *, int, int, int); 110 111 int gem_mediachange(struct ifnet *); 112 void gem_mediastatus(struct ifnet *, struct ifmediareq *); 113 114 struct mbuf *gem_get(struct gem_softc *, int, int); 115 int gem_eint(struct gem_softc *, u_int); 116 int gem_rint(struct gem_softc *); 117 int gem_tint(struct gem_softc *, u_int32_t); 118 int gem_pint(struct gem_softc *); 119 120 #ifdef GEM_DEBUG 121 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 122 printf x 123 #else 124 #define DPRINTF(sc, x) /* nothing */ 125 #endif 126 127 /* 128 * Attach a Gem interface to the system. 129 */ 130 void 131 gem_config(struct gem_softc *sc) 132 { 133 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 134 struct mii_data *mii = &sc->sc_mii; 135 struct mii_softc *child; 136 int i, error, phyad; 137 struct ifmedia_entry *ifm; 138 139 /* Make sure the chip is stopped. */ 140 ifp->if_softc = sc; 141 gem_reset(sc); 142 143 /* 144 * Allocate the control data structures, and create and load the 145 * DMA map for it. 146 */ 147 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 148 sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 149 1, &sc->sc_cdnseg, 0)) != 0) { 150 printf("\n%s: unable to allocate control data, error = %d\n", 151 sc->sc_dev.dv_xname, error); 152 goto fail_0; 153 } 154 155 /* XXX should map this in with correct endianness */ 156 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 157 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data, 158 BUS_DMA_COHERENT)) != 0) { 159 printf("\n%s: unable to map control data, error = %d\n", 160 sc->sc_dev.dv_xname, error); 161 goto fail_1; 162 } 163 164 if ((error = bus_dmamap_create(sc->sc_dmatag, 165 sizeof(struct gem_control_data), 1, 166 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 167 printf("\n%s: unable to create control data DMA map, " 168 "error = %d\n", sc->sc_dev.dv_xname, error); 169 goto fail_2; 170 } 171 172 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 173 sc->sc_control_data, sizeof(struct gem_control_data), NULL, 174 0)) != 0) { 175 printf("\n%s: unable to load control data DMA map, error = %d\n", 176 sc->sc_dev.dv_xname, error); 177 goto fail_3; 178 } 179 180 /* 181 * Create the receive buffer DMA maps. 182 */ 183 for (i = 0; i < GEM_NRXDESC; i++) { 184 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 185 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 186 printf("\n%s: unable to create rx DMA map %d, " 187 "error = %d\n", sc->sc_dev.dv_xname, i, error); 188 goto fail_5; 189 } 190 sc->sc_rxsoft[i].rxs_mbuf = NULL; 191 } 192 /* 193 * Create the transmit buffer DMA maps. 194 */ 195 for (i = 0; i < GEM_NTXDESC; i++) { 196 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 197 GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 198 &sc->sc_txd[i].sd_map)) != 0) { 199 printf("\n%s: unable to create tx DMA map %d, " 200 "error = %d\n", sc->sc_dev.dv_xname, i, error); 201 goto fail_6; 202 } 203 sc->sc_txd[i].sd_mbuf = NULL; 204 } 205 206 /* 207 * From this point forward, the attachment cannot fail. A failure 208 * before this point releases all resources that may have been 209 * allocated. 210 */ 211 212 /* Announce ourselves. */ 213 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 214 215 /* Get RX FIFO size */ 216 sc->sc_rxfifosize = 64 * 217 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE); 218 219 /* Initialize ifnet structure. */ 220 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 221 ifp->if_softc = sc; 222 ifp->if_flags = 223 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 224 ifp->if_start = gem_start; 225 ifp->if_ioctl = gem_ioctl; 226 ifp->if_watchdog = gem_watchdog; 227 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1); 228 IFQ_SET_READY(&ifp->if_snd); 229 230 ifp->if_capabilities = IFCAP_VLAN_MTU; 231 232 /* Initialize ifmedia structures and MII info */ 233 mii->mii_ifp = ifp; 234 mii->mii_readreg = gem_mii_readreg; 235 mii->mii_writereg = gem_mii_writereg; 236 mii->mii_statchg = gem_mii_statchg; 237 238 ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); 239 240 /* Bad things will happen if we touch this register on ERI. */ 241 if (sc->sc_variant != GEM_SUN_ERI) 242 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 243 GEM_MII_DATAPATH_MODE, 0); 244 245 gem_mifinit(sc); 246 247 /* 248 * Look for an external PHY. 249 */ 250 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 251 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 252 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 253 GEM_MIF_CONFIG, sc->sc_mif_config); 254 255 switch (sc->sc_variant) { 256 case GEM_SUN_ERI: 257 phyad = GEM_PHYAD_EXTERNAL; 258 break; 259 default: 260 phyad = MII_PHY_ANY; 261 break; 262 } 263 264 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, 265 MII_OFFSET_ANY, 0); 266 } 267 268 /* 269 * Fall back on an internal PHY if no external PHY was found. 270 */ 271 child = LIST_FIRST(&mii->mii_phys); 272 if (child == NULL && sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) { 273 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 274 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 275 GEM_MIF_CONFIG, sc->sc_mif_config); 276 277 switch (sc->sc_variant) { 278 case GEM_SUN_ERI: 279 case GEM_APPLE_K2_GMAC: 280 phyad = GEM_PHYAD_INTERNAL; 281 break; 282 case GEM_APPLE_GMAC: 283 phyad = GEM_PHYAD_EXTERNAL; 284 break; 285 default: 286 phyad = MII_PHY_ANY; 287 break; 288 } 289 290 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, 291 MII_OFFSET_ANY, 0); 292 } 293 294 /* 295 * Try the external PCS SERDES if we didn't find any MII 296 * devices. 297 */ 298 child = LIST_FIRST(&mii->mii_phys); 299 if (child == NULL && sc->sc_variant != GEM_SUN_ERI) { 300 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 301 GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES); 302 303 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 304 GEM_MII_SLINK_CONTROL, 305 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D); 306 307 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 308 GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 309 310 mii->mii_readreg = gem_pcs_readreg; 311 mii->mii_writereg = gem_pcs_writereg; 312 313 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 314 MII_OFFSET_ANY, MIIF_NOISOLATE); 315 } 316 317 child = LIST_FIRST(&mii->mii_phys); 318 if (child == NULL) { 319 /* No PHY attached */ 320 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 321 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 322 } else { 323 /* 324 * XXX - we can really do the following ONLY if the 325 * phy indeed has the auto negotiation capability!! 326 */ 327 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 328 } 329 330 /* Check if we support GigE media. */ 331 TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) { 332 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T || 333 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX || 334 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX || 335 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) { 336 sc->sc_flags |= GEM_GIGABIT; 337 break; 338 } 339 } 340 341 /* Attach the interface. */ 342 if_attach(ifp); 343 ether_ifattach(ifp); 344 345 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc); 346 if (sc->sc_sh == NULL) 347 panic("gem_config: can't establish shutdownhook"); 348 349 timeout_set(&sc->sc_tick_ch, gem_tick, sc); 350 return; 351 352 /* 353 * Free any resources we've allocated during the failed attach 354 * attempt. Do this in reverse order and fall through. 355 */ 356 fail_6: 357 for (i = 0; i < GEM_NTXDESC; i++) { 358 if (sc->sc_txd[i].sd_map != NULL) 359 bus_dmamap_destroy(sc->sc_dmatag, 360 sc->sc_txd[i].sd_map); 361 } 362 fail_5: 363 for (i = 0; i < GEM_NRXDESC; i++) { 364 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 365 bus_dmamap_destroy(sc->sc_dmatag, 366 sc->sc_rxsoft[i].rxs_dmamap); 367 } 368 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 369 fail_3: 370 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 371 fail_2: 372 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 373 sizeof(struct gem_control_data)); 374 fail_1: 375 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 376 fail_0: 377 return; 378 } 379 380 381 void 382 gem_tick(void *arg) 383 { 384 struct gem_softc *sc = arg; 385 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 386 bus_space_tag_t t = sc->sc_bustag; 387 bus_space_handle_t mac = sc->sc_h1; 388 int s; 389 u_int32_t v; 390 391 /* unload collisions counters */ 392 v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 393 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 394 ifp->if_collisions += v + 395 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 396 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT); 397 ifp->if_oerrors += v; 398 399 /* read error counters */ 400 ifp->if_ierrors += 401 bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) + 402 bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) + 403 bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) + 404 bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL); 405 406 /* clear the hardware counters */ 407 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 408 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 409 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 410 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 411 bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0); 412 bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0); 413 bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0); 414 bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0); 415 416 s = splnet(); 417 mii_tick(&sc->sc_mii); 418 splx(s); 419 420 timeout_add_sec(&sc->sc_tick_ch, 1); 421 } 422 423 int 424 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, 425 u_int32_t clr, u_int32_t set) 426 { 427 int i; 428 u_int32_t reg; 429 430 for (i = TRIES; i--; DELAY(100)) { 431 reg = bus_space_read_4(sc->sc_bustag, h, r); 432 if ((reg & clr) == 0 && (reg & set) == set) 433 return (1); 434 } 435 436 return (0); 437 } 438 439 void 440 gem_reset(struct gem_softc *sc) 441 { 442 bus_space_tag_t t = sc->sc_bustag; 443 bus_space_handle_t h = sc->sc_h2; 444 int s; 445 446 s = splnet(); 447 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); 448 gem_reset_rx(sc); 449 gem_reset_tx(sc); 450 451 /* Do a full reset */ 452 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX); 453 if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 454 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname); 455 splx(s); 456 } 457 458 459 /* 460 * Drain the receive queue. 461 */ 462 void 463 gem_rxdrain(struct gem_softc *sc) 464 { 465 struct gem_rxsoft *rxs; 466 int i; 467 468 for (i = 0; i < GEM_NRXDESC; i++) { 469 rxs = &sc->sc_rxsoft[i]; 470 if (rxs->rxs_mbuf != NULL) { 471 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 472 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 473 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 474 m_freem(rxs->rxs_mbuf); 475 rxs->rxs_mbuf = NULL; 476 } 477 } 478 } 479 480 /* 481 * Reset the whole thing. 482 */ 483 void 484 gem_stop(struct ifnet *ifp, int disable) 485 { 486 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 487 struct gem_sxd *sd; 488 u_int32_t i; 489 490 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); 491 492 timeout_del(&sc->sc_tick_ch); 493 494 /* 495 * Mark the interface down and cancel the watchdog timer. 496 */ 497 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 498 ifp->if_timer = 0; 499 500 mii_down(&sc->sc_mii); 501 502 gem_reset_rx(sc); 503 gem_reset_tx(sc); 504 505 /* 506 * Release any queued transmit buffers. 507 */ 508 for (i = 0; i < GEM_NTXDESC; i++) { 509 sd = &sc->sc_txd[i]; 510 if (sd->sd_mbuf != NULL) { 511 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 512 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 513 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 514 m_freem(sd->sd_mbuf); 515 sd->sd_mbuf = NULL; 516 } 517 } 518 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 519 520 if (disable) 521 gem_rxdrain(sc); 522 } 523 524 525 /* 526 * Reset the receiver 527 */ 528 int 529 gem_reset_rx(struct gem_softc *sc) 530 { 531 bus_space_tag_t t = sc->sc_bustag; 532 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 533 534 /* 535 * Resetting while DMA is in progress can cause a bus hang, so we 536 * disable DMA first. 537 */ 538 gem_disable_rx(sc); 539 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 540 /* Wait till it finishes */ 541 if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0)) 542 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname); 543 /* Wait 5ms extra. */ 544 delay(5000); 545 546 /* Finally, reset the ERX */ 547 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX); 548 /* Wait till it finishes */ 549 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) { 550 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname); 551 return (1); 552 } 553 return (0); 554 } 555 556 557 /* 558 * Reset the transmitter 559 */ 560 int 561 gem_reset_tx(struct gem_softc *sc) 562 { 563 bus_space_tag_t t = sc->sc_bustag; 564 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 565 566 /* 567 * Resetting while DMA is in progress can cause a bus hang, so we 568 * disable DMA first. 569 */ 570 gem_disable_tx(sc); 571 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 572 /* Wait till it finishes */ 573 if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0)) 574 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname); 575 /* Wait 5ms extra. */ 576 delay(5000); 577 578 /* Finally, reset the ETX */ 579 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX); 580 /* Wait till it finishes */ 581 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) { 582 printf("%s: cannot reset transmitter\n", 583 sc->sc_dev.dv_xname); 584 return (1); 585 } 586 return (0); 587 } 588 589 /* 590 * Disable receiver. 591 */ 592 int 593 gem_disable_rx(struct gem_softc *sc) 594 { 595 bus_space_tag_t t = sc->sc_bustag; 596 bus_space_handle_t h = sc->sc_h1; 597 u_int32_t cfg; 598 599 /* Flip the enable bit */ 600 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 601 cfg &= ~GEM_MAC_RX_ENABLE; 602 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 603 604 /* Wait for it to finish */ 605 return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 606 } 607 608 /* 609 * Disable transmitter. 610 */ 611 int 612 gem_disable_tx(struct gem_softc *sc) 613 { 614 bus_space_tag_t t = sc->sc_bustag; 615 bus_space_handle_t h = sc->sc_h1; 616 u_int32_t cfg; 617 618 /* Flip the enable bit */ 619 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 620 cfg &= ~GEM_MAC_TX_ENABLE; 621 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 622 623 /* Wait for it to finish */ 624 return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 625 } 626 627 /* 628 * Initialize interface. 629 */ 630 int 631 gem_meminit(struct gem_softc *sc) 632 { 633 struct gem_rxsoft *rxs; 634 int i, error; 635 636 /* 637 * Initialize the transmit descriptor ring. 638 */ 639 for (i = 0; i < GEM_NTXDESC; i++) { 640 sc->sc_txdescs[i].gd_flags = 0; 641 sc->sc_txdescs[i].gd_addr = 0; 642 } 643 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 644 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 645 646 /* 647 * Initialize the receive descriptor and receive job 648 * descriptor rings. 649 */ 650 for (i = 0; i < GEM_NRXDESC; i++) { 651 rxs = &sc->sc_rxsoft[i]; 652 if (rxs->rxs_mbuf == NULL) { 653 if ((error = gem_add_rxbuf(sc, i)) != 0) { 654 printf("%s: unable to allocate or map rx " 655 "buffer %d, error = %d\n", 656 sc->sc_dev.dv_xname, i, error); 657 /* 658 * XXX Should attempt to run with fewer receive 659 * XXX buffers instead of just failing. 660 */ 661 gem_rxdrain(sc); 662 return (1); 663 } 664 } else 665 GEM_INIT_RXDESC(sc, i); 666 } 667 sc->sc_rxptr = 0; 668 669 return (0); 670 } 671 672 int 673 gem_ringsize(int sz) 674 { 675 switch (sz) { 676 case 32: 677 return GEM_RING_SZ_32; 678 case 64: 679 return GEM_RING_SZ_64; 680 case 128: 681 return GEM_RING_SZ_128; 682 case 256: 683 return GEM_RING_SZ_256; 684 case 512: 685 return GEM_RING_SZ_512; 686 case 1024: 687 return GEM_RING_SZ_1024; 688 case 2048: 689 return GEM_RING_SZ_2048; 690 case 4096: 691 return GEM_RING_SZ_4096; 692 case 8192: 693 return GEM_RING_SZ_8192; 694 default: 695 printf("gem: invalid Receive Descriptor ring size %d\n", sz); 696 return GEM_RING_SZ_32; 697 } 698 } 699 700 /* 701 * Initialization of interface; set up initialization block 702 * and transmit/receive descriptor rings. 703 */ 704 int 705 gem_init(struct ifnet *ifp) 706 { 707 708 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 709 bus_space_tag_t t = sc->sc_bustag; 710 bus_space_handle_t h = sc->sc_h1; 711 int s; 712 u_int max_frame_size; 713 u_int32_t v; 714 715 s = splnet(); 716 717 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); 718 /* 719 * Initialization sequence. The numbered steps below correspond 720 * to the sequence outlined in section 6.3.5.1 in the Ethernet 721 * Channel Engine manual (part of the PCIO manual). 722 * See also the STP2002-STQ document from Sun Microsystems. 723 */ 724 725 /* step 1 & 2. Reset the Ethernet Channel */ 726 gem_stop(ifp, 0); 727 gem_reset(sc); 728 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); 729 730 /* Re-initialize the MIF */ 731 gem_mifinit(sc); 732 733 /* Call MI reset function if any */ 734 if (sc->sc_hwreset) 735 (*sc->sc_hwreset)(sc); 736 737 /* step 3. Setup data structures in host memory */ 738 gem_meminit(sc); 739 740 /* step 4. TX MAC registers & counters */ 741 gem_init_regs(sc); 742 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 743 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 744 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 745 746 /* step 5. RX MAC registers & counters */ 747 gem_setladrf(sc); 748 749 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 750 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 751 (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32)); 752 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 753 754 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 755 (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32)); 756 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 757 758 /* step 8. Global Configuration & Interrupt Mask */ 759 bus_space_write_4(t, h, GEM_INTMASK, 760 ~(GEM_INTR_TX_INTME| 761 GEM_INTR_TX_EMPTY| 762 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 763 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 764 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 765 GEM_INTR_BERR)); 766 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 767 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 768 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 769 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 770 771 /* step 9. ETX Configuration: use mostly default values */ 772 773 /* Enable DMA */ 774 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 775 bus_space_write_4(t, h, GEM_TX_CONFIG, 776 v|GEM_TX_CONFIG_TXDMA_EN| 777 ((0x4ff<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 778 bus_space_write_4(t, h, GEM_TX_KICK, 0); 779 780 /* step 10. ERX Configuration */ 781 782 /* Encode Receive Descriptor ring size: four possible values */ 783 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 784 785 /* Enable DMA */ 786 bus_space_write_4(t, h, GEM_RX_CONFIG, 787 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 788 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 789 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 790 /* 791 * The following value is for an OFF Threshold of about 3/4 full 792 * and an ON Threshold of 1/4 full. 793 */ 794 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 795 (3 * sc->sc_rxfifosize / 256) | 796 ( (sc->sc_rxfifosize / 256) << 12)); 797 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 798 799 /* step 11. Configure Media */ 800 mii_mediachg(&sc->sc_mii); 801 802 /* step 12. RX_MAC Configuration Register */ 803 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 804 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 805 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 806 807 /* step 14. Issue Transmit Pending command */ 808 809 /* Call MI initialization function if any */ 810 if (sc->sc_hwinit) 811 (*sc->sc_hwinit)(sc); 812 813 814 /* step 15. Give the receiver a swift kick */ 815 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 816 817 /* Start the one second timer. */ 818 timeout_add_sec(&sc->sc_tick_ch, 1); 819 820 ifp->if_flags |= IFF_RUNNING; 821 ifp->if_flags &= ~IFF_OACTIVE; 822 ifp->if_timer = 0; 823 splx(s); 824 825 return (0); 826 } 827 828 void 829 gem_init_regs(struct gem_softc *sc) 830 { 831 bus_space_tag_t t = sc->sc_bustag; 832 bus_space_handle_t h = sc->sc_h1; 833 u_int32_t v; 834 835 /* These regs are not cleared on reset */ 836 sc->sc_inited = 0; 837 if (!sc->sc_inited) { 838 839 /* Wooo. Magic values. */ 840 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 841 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 842 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 843 844 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 845 /* Max frame and max burst size */ 846 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 847 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v); 848 849 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 850 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 851 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 852 /* Dunno.... */ 853 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 854 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 855 ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff); 856 857 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 858 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 859 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 860 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 861 /* MAC control addr set to 0:1:c2:0:1:80 */ 862 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 863 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 864 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 865 866 /* MAC filter addr set to 0:0:0:0:0:0 */ 867 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 868 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 869 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 870 871 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 872 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 873 874 sc->sc_inited = 1; 875 } 876 877 /* Counters need to be zeroed */ 878 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 879 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 880 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 881 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 882 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 883 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 884 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 885 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 886 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 887 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 888 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 889 890 /* Un-pause stuff */ 891 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 892 893 /* 894 * Set the station address. 895 */ 896 bus_space_write_4(t, h, GEM_MAC_ADDR0, 897 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 898 bus_space_write_4(t, h, GEM_MAC_ADDR1, 899 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 900 bus_space_write_4(t, h, GEM_MAC_ADDR2, 901 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 902 } 903 904 /* 905 * Receive interrupt. 906 */ 907 int 908 gem_rint(struct gem_softc *sc) 909 { 910 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 911 bus_space_tag_t t = sc->sc_bustag; 912 bus_space_handle_t h = sc->sc_h1; 913 struct ether_header *eh; 914 struct gem_rxsoft *rxs; 915 struct mbuf *m; 916 u_int64_t rxstat; 917 int i, len; 918 919 for (i = sc->sc_rxptr;; i = GEM_NEXTRX(i)) { 920 rxs = &sc->sc_rxsoft[i]; 921 922 GEM_CDRXSYNC(sc, i, 923 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 924 925 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 926 927 if (rxstat & GEM_RD_OWN) { 928 /* 929 * We have processed all of the receive buffers. 930 */ 931 break; 932 } 933 934 if (rxstat & GEM_RD_BAD_CRC) { 935 #ifdef GEM_DEBUG 936 printf("%s: receive error: CRC error\n", 937 sc->sc_dev.dv_xname); 938 #endif 939 GEM_INIT_RXDESC(sc, i); 940 continue; 941 } 942 943 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 944 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 945 #ifdef GEM_DEBUG 946 if (ifp->if_flags & IFF_DEBUG) { 947 printf(" rxsoft %p descriptor %d: ", rxs, i); 948 printf("gd_flags: 0x%016llx\t", (long long) 949 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 950 printf("gd_addr: 0x%016llx\n", (long long) 951 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 952 } 953 #endif 954 955 /* No errors; receive the packet. */ 956 len = GEM_RD_BUFLEN(rxstat); 957 958 /* 959 * Allocate a new mbuf cluster. If that fails, we are 960 * out of memory, and must drop the packet and recycle 961 * the buffer that's already attached to this descriptor. 962 */ 963 m = rxs->rxs_mbuf; 964 if (gem_add_rxbuf(sc, i) != 0) { 965 ifp->if_ierrors++; 966 GEM_INIT_RXDESC(sc, i); 967 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 968 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 969 continue; 970 } 971 m->m_data += 2; /* We're already off by two */ 972 973 ifp->if_ipackets++; 974 eh = mtod(m, struct ether_header *); 975 m->m_pkthdr.rcvif = ifp; 976 m->m_pkthdr.len = m->m_len = len; 977 978 #if NBPFILTER > 0 979 /* 980 * Pass this up to any BPF listeners, but only 981 * pass it up the stack if its for us. 982 */ 983 if (ifp->if_bpf) 984 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 985 #endif /* NBPFILTER > 0 */ 986 987 /* Pass it on. */ 988 ether_input_mbuf(ifp, m); 989 } 990 991 /* Update the receive pointer. */ 992 sc->sc_rxptr = i; 993 bus_space_write_4(t, h, GEM_RX_KICK, i); 994 995 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 996 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 997 998 return (1); 999 } 1000 1001 1002 /* 1003 * Add a receive buffer to the indicated descriptor. 1004 */ 1005 int 1006 gem_add_rxbuf(struct gem_softc *sc, int idx) 1007 { 1008 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1009 struct mbuf *m; 1010 int error; 1011 1012 MGETHDR(m, M_DONTWAIT, MT_DATA); 1013 if (m == NULL) 1014 return (ENOBUFS); 1015 1016 MCLGET(m, M_DONTWAIT); 1017 if ((m->m_flags & M_EXT) == 0) { 1018 m_freem(m); 1019 return (ENOBUFS); 1020 } 1021 1022 #ifdef GEM_DEBUG 1023 /* bzero the packet to check dma */ 1024 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1025 #endif 1026 1027 if (rxs->rxs_mbuf != NULL) 1028 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1029 1030 rxs->rxs_mbuf = m; 1031 1032 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1033 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1034 BUS_DMA_READ|BUS_DMA_NOWAIT); 1035 if (error) { 1036 printf("%s: can't load rx DMA map %d, error = %d\n", 1037 sc->sc_dev.dv_xname, idx, error); 1038 panic("gem_add_rxbuf"); /* XXX */ 1039 } 1040 1041 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1042 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1043 1044 GEM_INIT_RXDESC(sc, idx); 1045 1046 return (0); 1047 } 1048 1049 1050 int 1051 gem_eint(struct gem_softc *sc, u_int status) 1052 { 1053 if ((status & GEM_INTR_MIF) != 0) { 1054 #ifdef GEM_DEBUG 1055 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1056 #endif 1057 return (1); 1058 } 1059 1060 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS); 1061 return (1); 1062 } 1063 1064 int 1065 gem_pint(struct gem_softc *sc) 1066 { 1067 bus_space_tag_t t = sc->sc_bustag; 1068 bus_space_handle_t seb = sc->sc_h1; 1069 u_int32_t status; 1070 1071 status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS); 1072 status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS); 1073 #ifdef GEM_DEBUG 1074 if (status) 1075 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1076 #endif 1077 return (1); 1078 } 1079 1080 int 1081 gem_intr(void *v) 1082 { 1083 struct gem_softc *sc = (struct gem_softc *)v; 1084 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1085 bus_space_tag_t t = sc->sc_bustag; 1086 bus_space_handle_t seb = sc->sc_h1; 1087 u_int32_t status; 1088 int r = 0; 1089 1090 status = bus_space_read_4(t, seb, GEM_STATUS); 1091 DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", 1092 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); 1093 1094 if ((status & GEM_INTR_PCS) != 0) 1095 r |= gem_pint(sc); 1096 1097 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1098 r |= gem_eint(sc, status); 1099 1100 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1101 r |= gem_tint(sc, status); 1102 1103 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1104 r |= gem_rint(sc); 1105 1106 /* We should eventually do more than just print out error stats. */ 1107 if (status & GEM_INTR_TX_MAC) { 1108 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1109 #ifdef GEM_DEBUG 1110 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1111 printf("%s: MAC tx fault, status %x\n", 1112 sc->sc_dev.dv_xname, txstat); 1113 #endif 1114 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1115 gem_init(ifp); 1116 } 1117 if (status & GEM_INTR_RX_MAC) { 1118 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1119 #ifdef GEM_DEBUG 1120 if (rxstat & ~GEM_MAC_RX_DONE) 1121 printf("%s: MAC rx fault, status %x\n", 1122 sc->sc_dev.dv_xname, rxstat); 1123 #endif 1124 /* 1125 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often 1126 * due to a silicon bug so handle them silently. 1127 */ 1128 if (rxstat & GEM_MAC_RX_OVERFLOW) { 1129 ifp->if_ierrors++; 1130 gem_init(ifp); 1131 } 1132 #ifdef GEM_DEBUG 1133 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1134 printf("%s: MAC rx fault, status %x\n", 1135 sc->sc_dev.dv_xname, rxstat); 1136 #endif 1137 } 1138 return (r); 1139 } 1140 1141 1142 void 1143 gem_watchdog(struct ifnet *ifp) 1144 { 1145 struct gem_softc *sc = ifp->if_softc; 1146 1147 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1148 "GEM_MAC_RX_CONFIG %x\n", 1149 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), 1150 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), 1151 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); 1152 1153 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1154 ++ifp->if_oerrors; 1155 1156 /* Try to get more packets going. */ 1157 gem_init(ifp); 1158 } 1159 1160 /* 1161 * Initialize the MII Management Interface 1162 */ 1163 void 1164 gem_mifinit(struct gem_softc *sc) 1165 { 1166 bus_space_tag_t t = sc->sc_bustag; 1167 bus_space_handle_t mif = sc->sc_h1; 1168 1169 /* Configure the MIF in frame mode */ 1170 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1171 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1172 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1173 } 1174 1175 /* 1176 * MII interface 1177 * 1178 * The GEM MII interface supports at least three different operating modes: 1179 * 1180 * Bitbang mode is implemented using data, clock and output enable registers. 1181 * 1182 * Frame mode is implemented by loading a complete frame into the frame 1183 * register and polling the valid bit for completion. 1184 * 1185 * Polling mode uses the frame register but completion is indicated by 1186 * an interrupt. 1187 * 1188 */ 1189 int 1190 gem_mii_readreg(struct device *self, int phy, int reg) 1191 { 1192 struct gem_softc *sc = (void *)self; 1193 bus_space_tag_t t = sc->sc_bustag; 1194 bus_space_handle_t mif = sc->sc_h1; 1195 int n; 1196 u_int32_t v; 1197 1198 #ifdef GEM_DEBUG 1199 if (sc->sc_debug) 1200 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1201 #endif 1202 1203 /* Construct the frame command */ 1204 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1205 GEM_MIF_FRAME_READ; 1206 1207 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1208 for (n = 0; n < 100; n++) { 1209 DELAY(1); 1210 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1211 if (v & GEM_MIF_FRAME_TA0) 1212 return (v & GEM_MIF_FRAME_DATA); 1213 } 1214 1215 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1216 return (0); 1217 } 1218 1219 void 1220 gem_mii_writereg(struct device *self, int phy, int reg, int val) 1221 { 1222 struct gem_softc *sc = (void *)self; 1223 bus_space_tag_t t = sc->sc_bustag; 1224 bus_space_handle_t mif = sc->sc_h1; 1225 int n; 1226 u_int32_t v; 1227 1228 #ifdef GEM_DEBUG 1229 if (sc->sc_debug) 1230 printf("gem_mii_writereg: phy %d reg %d val %x\n", 1231 phy, reg, val); 1232 #endif 1233 1234 /* Construct the frame command */ 1235 v = GEM_MIF_FRAME_WRITE | 1236 (phy << GEM_MIF_PHY_SHIFT) | 1237 (reg << GEM_MIF_REG_SHIFT) | 1238 (val & GEM_MIF_FRAME_DATA); 1239 1240 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1241 for (n = 0; n < 100; n++) { 1242 DELAY(1); 1243 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1244 if (v & GEM_MIF_FRAME_TA0) 1245 return; 1246 } 1247 1248 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1249 } 1250 1251 void 1252 gem_mii_statchg(struct device *dev) 1253 { 1254 struct gem_softc *sc = (void *)dev; 1255 #ifdef GEM_DEBUG 1256 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1257 #endif 1258 bus_space_tag_t t = sc->sc_bustag; 1259 bus_space_handle_t mac = sc->sc_h1; 1260 u_int32_t v; 1261 1262 #ifdef GEM_DEBUG 1263 if (sc->sc_debug) 1264 printf("gem_mii_statchg: status change: phy = %d\n", instance); 1265 #endif 1266 1267 /* Set tx full duplex options */ 1268 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1269 delay(10000); /* reg must be cleared and delay before changing. */ 1270 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1271 GEM_MAC_TX_ENABLE; 1272 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1273 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1274 } 1275 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1276 1277 /* XIF Configuration */ 1278 v = GEM_MAC_XIF_TX_MII_ENA; 1279 v |= GEM_MAC_XIF_LINK_LED; 1280 1281 /* External MII needs echo disable if half duplex. */ 1282 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1283 /* turn on full duplex LED */ 1284 v |= GEM_MAC_XIF_FDPLX_LED; 1285 else 1286 /* half duplex -- disable echo */ 1287 v |= GEM_MAC_XIF_ECHO_DISABL; 1288 1289 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1290 case IFM_1000_T: /* Gigabit using GMII interface */ 1291 case IFM_1000_SX: 1292 v |= GEM_MAC_XIF_GMII_MODE; 1293 break; 1294 default: 1295 v &= ~GEM_MAC_XIF_GMII_MODE; 1296 } 1297 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1298 } 1299 1300 int 1301 gem_pcs_readreg(struct device *self, int phy, int reg) 1302 { 1303 struct gem_softc *sc = (void *)self; 1304 bus_space_tag_t t = sc->sc_bustag; 1305 bus_space_handle_t pcs = sc->sc_h1; 1306 1307 #ifdef GEM_DEBUG 1308 if (sc->sc_debug) 1309 printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg); 1310 #endif 1311 1312 if (phy != GEM_PHYAD_EXTERNAL) 1313 return (0); 1314 1315 switch (reg) { 1316 case MII_BMCR: 1317 reg = GEM_MII_CONTROL; 1318 break; 1319 case MII_BMSR: 1320 reg = GEM_MII_STATUS; 1321 break; 1322 case MII_ANAR: 1323 reg = GEM_MII_ANAR; 1324 break; 1325 case MII_ANLPAR: 1326 reg = GEM_MII_ANLPAR; 1327 break; 1328 case MII_EXTSR: 1329 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1330 default: 1331 return (0); 1332 } 1333 1334 return bus_space_read_4(t, pcs, reg); 1335 } 1336 1337 void 1338 gem_pcs_writereg(struct device *self, int phy, int reg, int val) 1339 { 1340 struct gem_softc *sc = (void *)self; 1341 bus_space_tag_t t = sc->sc_bustag; 1342 bus_space_handle_t pcs = sc->sc_h1; 1343 int reset = 0; 1344 1345 #ifdef GEM_DEBUG 1346 if (sc->sc_debug) 1347 printf("gem_pcs_writereg: phy %d reg %d val %x\n", 1348 phy, reg, val); 1349 #endif 1350 1351 if (phy != GEM_PHYAD_EXTERNAL) 1352 return; 1353 1354 if (reg == MII_ANAR) 1355 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0); 1356 1357 switch (reg) { 1358 case MII_BMCR: 1359 reset = (val & GEM_MII_CONTROL_RESET); 1360 reg = GEM_MII_CONTROL; 1361 break; 1362 case MII_BMSR: 1363 reg = GEM_MII_STATUS; 1364 break; 1365 case MII_ANAR: 1366 reg = GEM_MII_ANAR; 1367 break; 1368 case MII_ANLPAR: 1369 reg = GEM_MII_ANLPAR; 1370 break; 1371 default: 1372 return; 1373 } 1374 1375 bus_space_write_4(t, pcs, reg, val); 1376 1377 if (reset) 1378 gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0); 1379 1380 if (reg == GEM_MII_ANAR || reset) { 1381 bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL, 1382 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D); 1383 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 1384 GEM_MII_CONFIG_ENABLE); 1385 } 1386 } 1387 1388 int 1389 gem_mediachange(struct ifnet *ifp) 1390 { 1391 struct gem_softc *sc = ifp->if_softc; 1392 struct mii_data *mii = &sc->sc_mii; 1393 1394 if (mii->mii_instance) { 1395 struct mii_softc *miisc; 1396 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1397 mii_phy_reset(miisc); 1398 } 1399 1400 return (mii_mediachg(&sc->sc_mii)); 1401 } 1402 1403 void 1404 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1405 { 1406 struct gem_softc *sc = ifp->if_softc; 1407 1408 mii_pollstat(&sc->sc_mii); 1409 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1410 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1411 } 1412 1413 /* 1414 * Process an ioctl request. 1415 */ 1416 int 1417 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1418 { 1419 struct gem_softc *sc = ifp->if_softc; 1420 struct ifaddr *ifa = (struct ifaddr *)data; 1421 struct ifreq *ifr = (struct ifreq *)data; 1422 int s, error = 0; 1423 1424 s = splnet(); 1425 1426 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 1427 splx(s); 1428 return (error); 1429 } 1430 1431 switch (cmd) { 1432 1433 case SIOCSIFADDR: 1434 ifp->if_flags |= IFF_UP; 1435 if ((ifp->if_flags & IFF_RUNNING) == 0) 1436 gem_init(ifp); 1437 #ifdef INET 1438 if (ifa->ifa_addr->sa_family == AF_INET) 1439 arp_ifinit(&sc->sc_arpcom, ifa); 1440 #endif 1441 break; 1442 1443 case SIOCSIFFLAGS: 1444 if (ifp->if_flags & IFF_UP) { 1445 if ((ifp->if_flags & IFF_RUNNING) && 1446 ((ifp->if_flags ^ sc->sc_if_flags) & 1447 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1448 gem_setladrf(sc); 1449 else { 1450 if ((ifp->if_flags & IFF_RUNNING) == 0) 1451 gem_init(ifp); 1452 } 1453 } else { 1454 if (ifp->if_flags & IFF_RUNNING) 1455 gem_stop(ifp, 1); 1456 } 1457 sc->sc_if_flags = ifp->if_flags; 1458 1459 #ifdef GEM_DEBUG 1460 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1461 #endif 1462 break; 1463 1464 case SIOCSIFMTU: 1465 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 1466 error = EINVAL; 1467 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1468 ifp->if_mtu = ifr->ifr_mtu; 1469 } 1470 break; 1471 1472 case SIOCADDMULTI: 1473 case SIOCDELMULTI: 1474 error = (cmd == SIOCADDMULTI) ? 1475 ether_addmulti(ifr, &sc->sc_arpcom) : 1476 ether_delmulti(ifr, &sc->sc_arpcom); 1477 1478 if (error == ENETRESET) { 1479 /* 1480 * Multicast list has changed; set the hardware filter 1481 * accordingly. 1482 */ 1483 if (ifp->if_flags & IFF_RUNNING) 1484 gem_setladrf(sc); 1485 error = 0; 1486 } 1487 break; 1488 1489 case SIOCGIFMEDIA: 1490 case SIOCSIFMEDIA: 1491 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1492 break; 1493 1494 default: 1495 error = ENOTTY; 1496 break; 1497 } 1498 1499 splx(s); 1500 return (error); 1501 } 1502 1503 1504 void 1505 gem_shutdown(void *arg) 1506 { 1507 struct gem_softc *sc = (struct gem_softc *)arg; 1508 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1509 1510 gem_stop(ifp, 1); 1511 } 1512 1513 /* 1514 * Set up the logical address filter. 1515 */ 1516 void 1517 gem_setladrf(struct gem_softc *sc) 1518 { 1519 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1520 struct ether_multi *enm; 1521 struct ether_multistep step; 1522 struct arpcom *ac = &sc->sc_arpcom; 1523 bus_space_tag_t t = sc->sc_bustag; 1524 bus_space_handle_t h = sc->sc_h1; 1525 u_int32_t crc, hash[16], v; 1526 int i; 1527 1528 /* Get current RX configuration */ 1529 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1530 1531 1532 /* 1533 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1534 * and hash filter. Depending on the case, the right bit will be 1535 * enabled. 1536 */ 1537 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1538 GEM_MAC_RX_PROMISC_GRP); 1539 1540 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1541 /* Turn on promiscuous mode */ 1542 v |= GEM_MAC_RX_PROMISCUOUS; 1543 ifp->if_flags |= IFF_ALLMULTI; 1544 goto chipit; 1545 } 1546 1547 /* 1548 * Set up multicast address filter by passing all multicast addresses 1549 * through a crc generator, and then using the high order 8 bits as an 1550 * index into the 256 bit logical address filter. The high order 4 1551 * bits selects the word, while the other 4 bits select the bit within 1552 * the word (where bit 0 is the MSB). 1553 */ 1554 1555 /* Clear hash table */ 1556 for (i = 0; i < 16; i++) 1557 hash[i] = 0; 1558 1559 1560 ETHER_FIRST_MULTI(step, ac, enm); 1561 while (enm != NULL) { 1562 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1563 /* 1564 * We must listen to a range of multicast addresses. 1565 * For now, just accept all multicasts, rather than 1566 * trying to set only those filter bits needed to match 1567 * the range. (At this time, the only use of address 1568 * ranges is for IP multicast routing, for which the 1569 * range is big enough to require all bits set.) 1570 * XXX use the addr filter for this 1571 */ 1572 ifp->if_flags |= IFF_ALLMULTI; 1573 v |= GEM_MAC_RX_PROMISC_GRP; 1574 goto chipit; 1575 } 1576 1577 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1578 1579 /* Just want the 8 most significant bits. */ 1580 crc >>= 24; 1581 1582 /* Set the corresponding bit in the filter. */ 1583 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1584 1585 ETHER_NEXT_MULTI(step, enm); 1586 } 1587 1588 v |= GEM_MAC_RX_HASH_FILTER; 1589 ifp->if_flags &= ~IFF_ALLMULTI; 1590 1591 /* Now load the hash table into the chip (if we are using it) */ 1592 for (i = 0; i < 16; i++) { 1593 bus_space_write_4(t, h, 1594 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1595 hash[i]); 1596 } 1597 1598 chipit: 1599 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1600 } 1601 1602 /* 1603 * Transmit interrupt. 1604 */ 1605 int 1606 gem_tint(struct gem_softc *sc, u_int32_t status) 1607 { 1608 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1609 struct gem_sxd *sd; 1610 u_int32_t cons, hwcons; 1611 1612 hwcons = status >> 19; 1613 cons = sc->sc_tx_cons; 1614 while (cons != hwcons) { 1615 sd = &sc->sc_txd[cons]; 1616 if (sd->sd_mbuf != NULL) { 1617 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 1618 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1619 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 1620 m_freem(sd->sd_mbuf); 1621 sd->sd_mbuf = NULL; 1622 ifp->if_opackets++; 1623 } 1624 sc->sc_tx_cnt--; 1625 if (++cons == GEM_NTXDESC) 1626 cons = 0; 1627 } 1628 sc->sc_tx_cons = cons; 1629 1630 if (sc->sc_tx_cnt < GEM_NTXDESC - 2) 1631 ifp->if_flags &= ~IFF_OACTIVE; 1632 if (sc->sc_tx_cnt == 0) 1633 ifp->if_timer = 0; 1634 1635 gem_start(ifp); 1636 1637 return (1); 1638 } 1639 1640 void 1641 gem_start(struct ifnet *ifp) 1642 { 1643 struct gem_softc *sc = ifp->if_softc; 1644 struct mbuf *m, *m0; 1645 u_int64_t flags; 1646 bus_dmamap_t map; 1647 u_int32_t cur, frag, i; 1648 int error; 1649 1650 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1651 return; 1652 1653 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) { 1654 IFQ_POLL(&ifp->if_snd, m); 1655 if (m == NULL) 1656 break; 1657 1658 /* 1659 * Encapsulate this packet and start it going... 1660 * or fail... 1661 */ 1662 1663 cur = frag = sc->sc_tx_prod; 1664 map = sc->sc_txd[cur].sd_map; 1665 m0 = NULL; 1666 1667 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1668 BUS_DMA_NOWAIT); 1669 if (error != 0 && error != EFBIG) 1670 goto drop; 1671 if (error != 0) { 1672 /* Too many fragments, linearize. */ 1673 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1674 if (m0 == NULL) 1675 goto drop; 1676 if (m->m_pkthdr.len > MHLEN) { 1677 MCLGET(m0, M_DONTWAIT); 1678 if (!(m0->m_flags & M_EXT)) { 1679 m_freem(m0); 1680 goto drop; 1681 } 1682 } 1683 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t)); 1684 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len; 1685 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m0, 1686 BUS_DMA_NOWAIT); 1687 if (error != 0) { 1688 m_freem(m0); 1689 goto drop; 1690 } 1691 } 1692 1693 if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) { 1694 bus_dmamap_unload(sc->sc_dmatag, map); 1695 ifp->if_flags |= IFF_OACTIVE; 1696 if (m0 != NULL) 1697 m_free(m0); 1698 break; 1699 } 1700 1701 /* We are now committed to transmitting the packet. */ 1702 1703 IFQ_DEQUEUE(&ifp->if_snd, m); 1704 if (m0 != NULL) { 1705 m_free(m); 1706 m = m0; 1707 } 1708 1709 #if NBPFILTER > 0 1710 /* 1711 * If BPF is listening on this interface, let it see the 1712 * packet before we commit it to the wire. 1713 */ 1714 if (ifp->if_bpf) 1715 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1716 #endif 1717 1718 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1719 BUS_DMASYNC_PREWRITE); 1720 1721 for (i = 0; i < map->dm_nsegs; i++) { 1722 sc->sc_txdescs[frag].gd_addr = 1723 GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr); 1724 flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE; 1725 if (i == 0) 1726 flags |= GEM_TD_START_OF_PACKET; 1727 if (i == (map->dm_nsegs - 1)) 1728 flags |= GEM_TD_END_OF_PACKET; 1729 sc->sc_txdescs[frag].gd_flags = 1730 GEM_DMA_WRITE(sc, flags); 1731 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1732 GEM_CDTXOFF(frag), sizeof(struct gem_desc), 1733 BUS_DMASYNC_PREWRITE); 1734 cur = frag; 1735 if (++frag == GEM_NTXDESC) 1736 frag = 0; 1737 } 1738 1739 sc->sc_tx_cnt += map->dm_nsegs; 1740 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map; 1741 sc->sc_txd[cur].sd_map = map; 1742 sc->sc_txd[cur].sd_mbuf = m; 1743 1744 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag); 1745 sc->sc_tx_prod = frag; 1746 1747 ifp->if_timer = 5; 1748 } 1749 1750 return; 1751 1752 drop: 1753 IFQ_DEQUEUE(&ifp->if_snd, m); 1754 m_free(m); 1755 ifp->if_oerrors++; 1756 } 1757