1 /* $OpenBSD: gem.c,v 1.102 2014/03/14 11:04:24 dlg Exp $ */ 2 /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2001 Eduardo Horvath. 7 * All rights reserved. 8 * 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #include "bpfilter.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/timeout.h> 42 #include <sys/mbuf.h> 43 #include <sys/syslog.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <machine/endian.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/ip.h> 61 #include <netinet/if_ether.h> 62 #include <netinet/tcp.h> 63 #include <netinet/udp.h> 64 #endif 65 66 #if NBPFILTER > 0 67 #include <net/bpf.h> 68 #endif 69 70 #include <machine/bus.h> 71 #include <machine/intr.h> 72 73 #include <dev/mii/mii.h> 74 #include <dev/mii/miivar.h> 75 #include <dev/mii/mii_bitbang.h> 76 77 #include <dev/ic/gemreg.h> 78 #include <dev/ic/gemvar.h> 79 80 #define TRIES 10000 81 82 struct cfdriver gem_cd = { 83 NULL, "gem", DV_IFNET 84 }; 85 86 void gem_start(struct ifnet *); 87 void gem_stop(struct ifnet *, int); 88 int gem_ioctl(struct ifnet *, u_long, caddr_t); 89 void gem_tick(void *); 90 void gem_watchdog(struct ifnet *); 91 int gem_init(struct ifnet *); 92 void gem_init_regs(struct gem_softc *); 93 int gem_ringsize(int); 94 int gem_meminit(struct gem_softc *); 95 void gem_mifinit(struct gem_softc *); 96 int gem_bitwait(struct gem_softc *, bus_space_handle_t, int, 97 u_int32_t, u_int32_t); 98 void gem_reset(struct gem_softc *); 99 int gem_reset_rx(struct gem_softc *); 100 int gem_reset_tx(struct gem_softc *); 101 int gem_disable_rx(struct gem_softc *); 102 int gem_disable_tx(struct gem_softc *); 103 void gem_rx_watchdog(void *); 104 void gem_rxdrain(struct gem_softc *); 105 void gem_fill_rx_ring(struct gem_softc *); 106 int gem_add_rxbuf(struct gem_softc *, int idx); 107 void gem_iff(struct gem_softc *); 108 109 /* MII methods & callbacks */ 110 int gem_mii_readreg(struct device *, int, int); 111 void gem_mii_writereg(struct device *, int, int, int); 112 void gem_mii_statchg(struct device *); 113 int gem_pcs_readreg(struct device *, int, int); 114 void gem_pcs_writereg(struct device *, int, int, int); 115 116 int gem_mediachange(struct ifnet *); 117 void gem_mediastatus(struct ifnet *, struct ifmediareq *); 118 119 int gem_eint(struct gem_softc *, u_int); 120 int gem_rint(struct gem_softc *); 121 int gem_tint(struct gem_softc *, u_int32_t); 122 int gem_pint(struct gem_softc *); 123 void gem_rxcksum(struct mbuf *, u_int64_t); 124 125 #ifdef GEM_DEBUG 126 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 127 printf x 128 #else 129 #define DPRINTF(sc, x) /* nothing */ 130 #endif 131 132 /* 133 * Attach a Gem interface to the system. 134 */ 135 void 136 gem_config(struct gem_softc *sc) 137 { 138 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 139 struct mii_data *mii = &sc->sc_mii; 140 struct mii_softc *child; 141 int i, error, mii_flags, phyad; 142 struct ifmedia_entry *ifm; 143 144 /* Make sure the chip is stopped. */ 145 ifp->if_softc = sc; 146 gem_reset(sc); 147 148 /* 149 * Allocate the control data structures, and create and load the 150 * DMA map for it. 151 */ 152 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 153 sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 154 1, &sc->sc_cdnseg, 0)) != 0) { 155 printf("\n%s: unable to allocate control data, error = %d\n", 156 sc->sc_dev.dv_xname, error); 157 goto fail_0; 158 } 159 160 /* XXX should map this in with correct endianness */ 161 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 162 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data, 163 BUS_DMA_COHERENT)) != 0) { 164 printf("\n%s: unable to map control data, error = %d\n", 165 sc->sc_dev.dv_xname, error); 166 goto fail_1; 167 } 168 169 if ((error = bus_dmamap_create(sc->sc_dmatag, 170 sizeof(struct gem_control_data), 1, 171 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 172 printf("\n%s: unable to create control data DMA map, " 173 "error = %d\n", sc->sc_dev.dv_xname, error); 174 goto fail_2; 175 } 176 177 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 178 sc->sc_control_data, sizeof(struct gem_control_data), NULL, 179 0)) != 0) { 180 printf("\n%s: unable to load control data DMA map, error = %d\n", 181 sc->sc_dev.dv_xname, error); 182 goto fail_3; 183 } 184 185 /* 186 * Create the receive buffer DMA maps. 187 */ 188 for (i = 0; i < GEM_NRXDESC; i++) { 189 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 190 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 191 printf("\n%s: unable to create rx DMA map %d, " 192 "error = %d\n", sc->sc_dev.dv_xname, i, error); 193 goto fail_5; 194 } 195 sc->sc_rxsoft[i].rxs_mbuf = NULL; 196 } 197 /* 198 * Create the transmit buffer DMA maps. 199 */ 200 for (i = 0; i < GEM_NTXDESC; i++) { 201 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 202 GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 203 &sc->sc_txd[i].sd_map)) != 0) { 204 printf("\n%s: unable to create tx DMA map %d, " 205 "error = %d\n", sc->sc_dev.dv_xname, i, error); 206 goto fail_6; 207 } 208 sc->sc_txd[i].sd_mbuf = NULL; 209 } 210 211 /* 212 * From this point forward, the attachment cannot fail. A failure 213 * before this point releases all resources that may have been 214 * allocated. 215 */ 216 217 /* Announce ourselves. */ 218 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 219 220 /* Get RX FIFO size */ 221 sc->sc_rxfifosize = 64 * 222 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE); 223 224 /* Initialize ifnet structure. */ 225 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 226 ifp->if_softc = sc; 227 ifp->if_flags = 228 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 229 ifp->if_start = gem_start; 230 ifp->if_ioctl = gem_ioctl; 231 ifp->if_watchdog = gem_watchdog; 232 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1); 233 IFQ_SET_READY(&ifp->if_snd); 234 235 /* Hardware reads RX descriptors in multiples of four. */ 236 m_clsetwms(ifp, MCLBYTES, 4, GEM_NRXDESC - 4); 237 238 ifp->if_capabilities = IFCAP_VLAN_MTU; 239 240 /* Initialize ifmedia structures and MII info */ 241 mii->mii_ifp = ifp; 242 mii->mii_readreg = gem_mii_readreg; 243 mii->mii_writereg = gem_mii_writereg; 244 mii->mii_statchg = gem_mii_statchg; 245 246 ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); 247 248 /* Bad things will happen if we touch this register on ERI. */ 249 if (sc->sc_variant != GEM_SUN_ERI) 250 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 251 GEM_MII_DATAPATH_MODE, 0); 252 253 gem_mifinit(sc); 254 255 mii_flags = MIIF_DOPAUSE; 256 257 /* 258 * Look for an external PHY. 259 */ 260 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 261 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 262 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 263 GEM_MIF_CONFIG, sc->sc_mif_config); 264 265 switch (sc->sc_variant) { 266 case GEM_SUN_ERI: 267 phyad = GEM_PHYAD_EXTERNAL; 268 break; 269 default: 270 phyad = MII_PHY_ANY; 271 break; 272 } 273 274 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, 275 MII_OFFSET_ANY, mii_flags); 276 } 277 278 /* 279 * Fall back on an internal PHY if no external PHY was found. 280 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be 281 * trusted when the firmware has powered down the chip 282 */ 283 child = LIST_FIRST(&mii->mii_phys); 284 if (child == NULL && 285 (sc->sc_mif_config & GEM_MIF_CONFIG_MDI0 || GEM_IS_APPLE(sc))) { 286 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 287 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 288 GEM_MIF_CONFIG, sc->sc_mif_config); 289 290 switch (sc->sc_variant) { 291 case GEM_SUN_ERI: 292 case GEM_APPLE_K2_GMAC: 293 phyad = GEM_PHYAD_INTERNAL; 294 break; 295 case GEM_APPLE_GMAC: 296 phyad = GEM_PHYAD_EXTERNAL; 297 break; 298 default: 299 phyad = MII_PHY_ANY; 300 break; 301 } 302 303 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, 304 MII_OFFSET_ANY, mii_flags); 305 } 306 307 /* 308 * Try the external PCS SERDES if we didn't find any MII 309 * devices. 310 */ 311 child = LIST_FIRST(&mii->mii_phys); 312 if (child == NULL && sc->sc_variant != GEM_SUN_ERI) { 313 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 314 GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES); 315 316 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 317 GEM_MII_SLINK_CONTROL, 318 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D); 319 320 bus_space_write_4(sc->sc_bustag, sc->sc_h1, 321 GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 322 323 mii->mii_readreg = gem_pcs_readreg; 324 mii->mii_writereg = gem_pcs_writereg; 325 326 mii_flags |= MIIF_NOISOLATE; 327 328 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 329 MII_OFFSET_ANY, mii_flags); 330 } 331 332 child = LIST_FIRST(&mii->mii_phys); 333 if (child == NULL) { 334 /* No PHY attached */ 335 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 336 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 337 } else { 338 /* 339 * XXX - we can really do the following ONLY if the 340 * phy indeed has the auto negotiation capability!! 341 */ 342 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 343 } 344 345 /* Check if we support GigE media. */ 346 TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) { 347 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T || 348 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX || 349 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX || 350 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) { 351 sc->sc_flags |= GEM_GIGABIT; 352 break; 353 } 354 } 355 356 /* Attach the interface. */ 357 if_attach(ifp); 358 ether_ifattach(ifp); 359 360 timeout_set(&sc->sc_tick_ch, gem_tick, sc); 361 timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc); 362 return; 363 364 /* 365 * Free any resources we've allocated during the failed attach 366 * attempt. Do this in reverse order and fall through. 367 */ 368 fail_6: 369 for (i = 0; i < GEM_NTXDESC; i++) { 370 if (sc->sc_txd[i].sd_map != NULL) 371 bus_dmamap_destroy(sc->sc_dmatag, 372 sc->sc_txd[i].sd_map); 373 } 374 fail_5: 375 for (i = 0; i < GEM_NRXDESC; i++) { 376 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 377 bus_dmamap_destroy(sc->sc_dmatag, 378 sc->sc_rxsoft[i].rxs_dmamap); 379 } 380 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 381 fail_3: 382 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 383 fail_2: 384 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 385 sizeof(struct gem_control_data)); 386 fail_1: 387 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 388 fail_0: 389 return; 390 } 391 392 void 393 gem_unconfig(struct gem_softc *sc) 394 { 395 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 396 int i; 397 398 gem_stop(ifp, 1); 399 400 for (i = 0; i < GEM_NTXDESC; i++) { 401 if (sc->sc_txd[i].sd_map != NULL) 402 bus_dmamap_destroy(sc->sc_dmatag, 403 sc->sc_txd[i].sd_map); 404 } 405 for (i = 0; i < GEM_NRXDESC; i++) { 406 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 407 bus_dmamap_destroy(sc->sc_dmatag, 408 sc->sc_rxsoft[i].rxs_dmamap); 409 } 410 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 411 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 412 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data, 413 sizeof(struct gem_control_data)); 414 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 415 416 /* Detach all PHYs */ 417 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 418 419 /* Delete all remaining media. */ 420 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 421 422 ether_ifdetach(ifp); 423 if_detach(ifp); 424 } 425 426 427 void 428 gem_tick(void *arg) 429 { 430 struct gem_softc *sc = arg; 431 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 432 bus_space_tag_t t = sc->sc_bustag; 433 bus_space_handle_t mac = sc->sc_h1; 434 int s; 435 u_int32_t v; 436 437 /* unload collisions counters */ 438 v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 439 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 440 ifp->if_collisions += v + 441 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 442 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT); 443 ifp->if_oerrors += v; 444 445 /* read error counters */ 446 ifp->if_ierrors += 447 bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) + 448 bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) + 449 bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) + 450 bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL); 451 452 /* clear the hardware counters */ 453 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 454 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 455 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 456 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 457 bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0); 458 bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0); 459 bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0); 460 bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0); 461 462 s = splnet(); 463 mii_tick(&sc->sc_mii); 464 splx(s); 465 466 timeout_add_sec(&sc->sc_tick_ch, 1); 467 } 468 469 int 470 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, 471 u_int32_t clr, u_int32_t set) 472 { 473 int i; 474 u_int32_t reg; 475 476 for (i = TRIES; i--; DELAY(100)) { 477 reg = bus_space_read_4(sc->sc_bustag, h, r); 478 if ((reg & clr) == 0 && (reg & set) == set) 479 return (1); 480 } 481 482 return (0); 483 } 484 485 void 486 gem_reset(struct gem_softc *sc) 487 { 488 bus_space_tag_t t = sc->sc_bustag; 489 bus_space_handle_t h = sc->sc_h2; 490 int s; 491 492 s = splnet(); 493 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); 494 gem_reset_rx(sc); 495 gem_reset_tx(sc); 496 497 /* Do a full reset */ 498 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX); 499 if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 500 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname); 501 splx(s); 502 } 503 504 505 /* 506 * Drain the receive queue. 507 */ 508 void 509 gem_rxdrain(struct gem_softc *sc) 510 { 511 struct gem_rxsoft *rxs; 512 int i; 513 514 for (i = 0; i < GEM_NRXDESC; i++) { 515 rxs = &sc->sc_rxsoft[i]; 516 if (rxs->rxs_mbuf != NULL) { 517 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 518 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 519 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 520 m_freem(rxs->rxs_mbuf); 521 rxs->rxs_mbuf = NULL; 522 } 523 } 524 sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0; 525 } 526 527 /* 528 * Reset the whole thing. 529 */ 530 void 531 gem_stop(struct ifnet *ifp, int softonly) 532 { 533 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 534 struct gem_sxd *sd; 535 u_int32_t i; 536 537 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); 538 539 timeout_del(&sc->sc_tick_ch); 540 541 /* 542 * Mark the interface down and cancel the watchdog timer. 543 */ 544 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 545 ifp->if_timer = 0; 546 547 if (!softonly) { 548 mii_down(&sc->sc_mii); 549 550 gem_reset_rx(sc); 551 gem_reset_tx(sc); 552 } 553 554 /* 555 * Release any queued transmit buffers. 556 */ 557 for (i = 0; i < GEM_NTXDESC; i++) { 558 sd = &sc->sc_txd[i]; 559 if (sd->sd_mbuf != NULL) { 560 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 561 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 562 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 563 m_freem(sd->sd_mbuf); 564 sd->sd_mbuf = NULL; 565 } 566 } 567 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 568 569 gem_rxdrain(sc); 570 } 571 572 573 /* 574 * Reset the receiver 575 */ 576 int 577 gem_reset_rx(struct gem_softc *sc) 578 { 579 bus_space_tag_t t = sc->sc_bustag; 580 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 581 582 /* 583 * Resetting while DMA is in progress can cause a bus hang, so we 584 * disable DMA first. 585 */ 586 gem_disable_rx(sc); 587 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 588 /* Wait till it finishes */ 589 if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0)) 590 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname); 591 /* Wait 5ms extra. */ 592 delay(5000); 593 594 /* Finally, reset the ERX */ 595 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX); 596 /* Wait till it finishes */ 597 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) { 598 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname); 599 return (1); 600 } 601 return (0); 602 } 603 604 605 /* 606 * Reset the transmitter 607 */ 608 int 609 gem_reset_tx(struct gem_softc *sc) 610 { 611 bus_space_tag_t t = sc->sc_bustag; 612 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 613 614 /* 615 * Resetting while DMA is in progress can cause a bus hang, so we 616 * disable DMA first. 617 */ 618 gem_disable_tx(sc); 619 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 620 /* Wait till it finishes */ 621 if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0)) 622 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname); 623 /* Wait 5ms extra. */ 624 delay(5000); 625 626 /* Finally, reset the ETX */ 627 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX); 628 /* Wait till it finishes */ 629 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) { 630 printf("%s: cannot reset transmitter\n", 631 sc->sc_dev.dv_xname); 632 return (1); 633 } 634 return (0); 635 } 636 637 /* 638 * Disable receiver. 639 */ 640 int 641 gem_disable_rx(struct gem_softc *sc) 642 { 643 bus_space_tag_t t = sc->sc_bustag; 644 bus_space_handle_t h = sc->sc_h1; 645 u_int32_t cfg; 646 647 /* Flip the enable bit */ 648 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 649 cfg &= ~GEM_MAC_RX_ENABLE; 650 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 651 652 /* Wait for it to finish */ 653 return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 654 } 655 656 /* 657 * Disable transmitter. 658 */ 659 int 660 gem_disable_tx(struct gem_softc *sc) 661 { 662 bus_space_tag_t t = sc->sc_bustag; 663 bus_space_handle_t h = sc->sc_h1; 664 u_int32_t cfg; 665 666 /* Flip the enable bit */ 667 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 668 cfg &= ~GEM_MAC_TX_ENABLE; 669 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 670 671 /* Wait for it to finish */ 672 return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 673 } 674 675 /* 676 * Initialize interface. 677 */ 678 int 679 gem_meminit(struct gem_softc *sc) 680 { 681 int i; 682 683 /* 684 * Initialize the transmit descriptor ring. 685 */ 686 for (i = 0; i < GEM_NTXDESC; i++) { 687 sc->sc_txdescs[i].gd_flags = 0; 688 sc->sc_txdescs[i].gd_addr = 0; 689 } 690 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 691 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 692 693 /* 694 * Initialize the receive descriptor and receive job 695 * descriptor rings. 696 */ 697 for (i = 0; i < GEM_NRXDESC; i++) { 698 sc->sc_rxdescs[i].gd_flags = 0; 699 sc->sc_rxdescs[i].gd_addr = 0; 700 } 701 gem_fill_rx_ring(sc); 702 703 return (0); 704 } 705 706 int 707 gem_ringsize(int sz) 708 { 709 switch (sz) { 710 case 32: 711 return GEM_RING_SZ_32; 712 case 64: 713 return GEM_RING_SZ_64; 714 case 128: 715 return GEM_RING_SZ_128; 716 case 256: 717 return GEM_RING_SZ_256; 718 case 512: 719 return GEM_RING_SZ_512; 720 case 1024: 721 return GEM_RING_SZ_1024; 722 case 2048: 723 return GEM_RING_SZ_2048; 724 case 4096: 725 return GEM_RING_SZ_4096; 726 case 8192: 727 return GEM_RING_SZ_8192; 728 default: 729 printf("gem: invalid Receive Descriptor ring size %d\n", sz); 730 return GEM_RING_SZ_32; 731 } 732 } 733 734 /* 735 * Initialization of interface; set up initialization block 736 * and transmit/receive descriptor rings. 737 */ 738 int 739 gem_init(struct ifnet *ifp) 740 { 741 742 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 743 bus_space_tag_t t = sc->sc_bustag; 744 bus_space_handle_t h = sc->sc_h1; 745 int s; 746 u_int32_t v; 747 748 s = splnet(); 749 750 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); 751 /* 752 * Initialization sequence. The numbered steps below correspond 753 * to the sequence outlined in section 6.3.5.1 in the Ethernet 754 * Channel Engine manual (part of the PCIO manual). 755 * See also the STP2002-STQ document from Sun Microsystems. 756 */ 757 758 /* step 1 & 2. Reset the Ethernet Channel */ 759 gem_stop(ifp, 0); 760 gem_reset(sc); 761 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); 762 763 /* Re-initialize the MIF */ 764 gem_mifinit(sc); 765 766 /* Call MI reset function if any */ 767 if (sc->sc_hwreset) 768 (*sc->sc_hwreset)(sc); 769 770 /* step 3. Setup data structures in host memory */ 771 gem_meminit(sc); 772 773 /* step 4. TX MAC registers & counters */ 774 gem_init_regs(sc); 775 776 /* step 5. RX MAC registers & counters */ 777 gem_iff(sc); 778 779 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 780 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 781 (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32)); 782 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 783 784 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 785 (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32)); 786 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 787 788 /* step 8. Global Configuration & Interrupt Mask */ 789 bus_space_write_4(t, h, GEM_INTMASK, 790 ~(GEM_INTR_TX_INTME| 791 GEM_INTR_TX_EMPTY| 792 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 793 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 794 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 795 GEM_INTR_BERR)); 796 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 797 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 798 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 799 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 800 801 /* step 9. ETX Configuration: use mostly default values */ 802 803 /* Enable DMA */ 804 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 805 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) & 806 GEM_TX_CONFIG_TXFIFO_TH; 807 bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 808 bus_space_write_4(t, h, GEM_TX_KICK, 0); 809 810 /* step 10. ERX Configuration */ 811 812 /* Encode Receive Descriptor ring size: four possible values */ 813 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 814 /* RX TCP/UDP checksum offset */ 815 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 816 GEM_RX_CONFIG_CXM_START_SHFT); 817 /* Enable DMA */ 818 bus_space_write_4(t, h, GEM_RX_CONFIG, 819 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 820 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 821 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 822 /* 823 * The following value is for an OFF Threshold of about 3/4 full 824 * and an ON Threshold of 1/4 full. 825 */ 826 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 827 (3 * sc->sc_rxfifosize / 256) | 828 ((sc->sc_rxfifosize / 256) << 12)); 829 bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6); 830 831 /* step 11. Configure Media */ 832 mii_mediachg(&sc->sc_mii); 833 834 /* step 12. RX_MAC Configuration Register */ 835 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 836 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 837 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 838 839 /* step 14. Issue Transmit Pending command */ 840 841 /* Call MI initialization function if any */ 842 if (sc->sc_hwinit) 843 (*sc->sc_hwinit)(sc); 844 845 /* step 15. Give the receiver a swift kick */ 846 bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod); 847 848 /* Start the one second timer. */ 849 timeout_add_sec(&sc->sc_tick_ch, 1); 850 851 ifp->if_flags |= IFF_RUNNING; 852 ifp->if_flags &= ~IFF_OACTIVE; 853 854 splx(s); 855 856 return (0); 857 } 858 859 void 860 gem_init_regs(struct gem_softc *sc) 861 { 862 bus_space_tag_t t = sc->sc_bustag; 863 bus_space_handle_t h = sc->sc_h1; 864 u_int32_t v; 865 866 /* These regs are not cleared on reset */ 867 sc->sc_inited = 0; 868 if (!sc->sc_inited) { 869 /* Load recommended values */ 870 bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00); 871 bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08); 872 bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04); 873 874 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 875 /* Max frame and max burst size */ 876 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 877 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 878 879 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07); 880 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04); 881 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 882 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 883 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 884 ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff); 885 886 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 887 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 888 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 889 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 890 891 /* MAC control addr set to 0:1:c2:0:1:80 */ 892 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 893 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 894 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 895 896 /* MAC filter addr set to 0:0:0:0:0:0 */ 897 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 898 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 899 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 900 901 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 902 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 903 904 sc->sc_inited = 1; 905 } 906 907 /* Counters need to be zeroed */ 908 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 909 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 910 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 911 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 912 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 913 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 914 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 915 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 916 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 917 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 918 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 919 920 /* Set XOFF PAUSE time */ 921 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1bf0); 922 923 /* 924 * Set the internal arbitration to "infinite" bursts of the 925 * maximum length of 31 * 64 bytes so DMA transfers aren't 926 * split up in cache line size chunks. This greatly improves 927 * especially RX performance. 928 * Enable silicon bug workarounds for the Apple variants. 929 */ 930 v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT; 931 if (sc->sc_pci) 932 v |= GEM_CONFIG_BURST_INF; 933 else 934 v |= GEM_CONFIG_BURST_64; 935 if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI) 936 v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX; 937 bus_space_write_4(t, h, GEM_CONFIG, v); 938 939 /* 940 * Set the station address. 941 */ 942 bus_space_write_4(t, h, GEM_MAC_ADDR0, 943 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 944 bus_space_write_4(t, h, GEM_MAC_ADDR1, 945 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 946 bus_space_write_4(t, h, GEM_MAC_ADDR2, 947 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 948 } 949 950 /* 951 * RX TCP/UDP checksum 952 */ 953 void 954 gem_rxcksum(struct mbuf *m, u_int64_t rxstat) 955 { 956 struct ether_header *eh; 957 struct ip *ip; 958 struct udphdr *uh; 959 int32_t hlen, len, pktlen; 960 u_int16_t cksum, *opts; 961 u_int32_t temp32; 962 union pseudoh { 963 struct hdr { 964 u_int16_t len; 965 u_int8_t ttl; 966 u_int8_t proto; 967 u_int32_t src; 968 u_int32_t dst; 969 } h; 970 u_int16_t w[6]; 971 } ph; 972 973 pktlen = m->m_pkthdr.len; 974 if (pktlen < sizeof(struct ether_header)) 975 return; 976 eh = mtod(m, struct ether_header *); 977 if (eh->ether_type != htons(ETHERTYPE_IP)) 978 return; 979 ip = (struct ip *)(eh + 1); 980 if (ip->ip_v != IPVERSION) 981 return; 982 983 hlen = ip->ip_hl << 2; 984 pktlen -= sizeof(struct ether_header); 985 if (hlen < sizeof(struct ip)) 986 return; 987 if (ntohs(ip->ip_len) < hlen) 988 return; 989 if (ntohs(ip->ip_len) != pktlen) 990 return; 991 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 992 return; /* can't handle fragmented packet */ 993 994 switch (ip->ip_p) { 995 case IPPROTO_TCP: 996 if (pktlen < (hlen + sizeof(struct tcphdr))) 997 return; 998 break; 999 case IPPROTO_UDP: 1000 if (pktlen < (hlen + sizeof(struct udphdr))) 1001 return; 1002 uh = (struct udphdr *)((caddr_t)ip + hlen); 1003 if (uh->uh_sum == 0) 1004 return; /* no checksum */ 1005 break; 1006 default: 1007 return; 1008 } 1009 1010 cksum = htons(~(rxstat & GEM_RD_CHECKSUM)); 1011 /* cksum fixup for IP options */ 1012 len = hlen - sizeof(struct ip); 1013 if (len > 0) { 1014 opts = (u_int16_t *)(ip + 1); 1015 for (; len > 0; len -= sizeof(u_int16_t), opts++) { 1016 temp32 = cksum - *opts; 1017 temp32 = (temp32 >> 16) + (temp32 & 65535); 1018 cksum = temp32 & 65535; 1019 } 1020 } 1021 1022 ph.h.len = htons(ntohs(ip->ip_len) - hlen); 1023 ph.h.ttl = 0; 1024 ph.h.proto = ip->ip_p; 1025 ph.h.src = ip->ip_src.s_addr; 1026 ph.h.dst = ip->ip_dst.s_addr; 1027 temp32 = cksum; 1028 opts = &ph.w[0]; 1029 temp32 += opts[0] + opts[1] + opts[2] + opts[3] + opts[4] + opts[5]; 1030 temp32 = (temp32 >> 16) + (temp32 & 65535); 1031 temp32 += (temp32 >> 16); 1032 cksum = ~temp32; 1033 if (cksum == 0) { 1034 m->m_pkthdr.csum_flags |= 1035 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1036 } 1037 } 1038 1039 /* 1040 * Receive interrupt. 1041 */ 1042 int 1043 gem_rint(struct gem_softc *sc) 1044 { 1045 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1046 bus_space_tag_t t = sc->sc_bustag; 1047 bus_space_handle_t h = sc->sc_h1; 1048 struct gem_rxsoft *rxs; 1049 struct mbuf *m; 1050 u_int64_t rxstat; 1051 int i, len; 1052 1053 for (i = sc->sc_rx_cons; sc->sc_rx_cnt > 0; i = GEM_NEXTRX(i)) { 1054 rxs = &sc->sc_rxsoft[i]; 1055 1056 GEM_CDRXSYNC(sc, i, 1057 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1058 1059 rxstat = GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags); 1060 1061 if (rxstat & GEM_RD_OWN) { 1062 /* We have processed all of the receive buffers. */ 1063 break; 1064 } 1065 1066 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1067 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1068 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1069 1070 m = rxs->rxs_mbuf; 1071 rxs->rxs_mbuf = NULL; 1072 1073 sc->sc_rx_cnt--; 1074 1075 if (rxstat & GEM_RD_BAD_CRC) { 1076 ifp->if_ierrors++; 1077 #ifdef GEM_DEBUG 1078 printf("%s: receive error: CRC error\n", 1079 sc->sc_dev.dv_xname); 1080 #endif 1081 m_freem(m); 1082 continue; 1083 } 1084 1085 #ifdef GEM_DEBUG 1086 if (ifp->if_flags & IFF_DEBUG) { 1087 printf(" rxsoft %p descriptor %d: ", rxs, i); 1088 printf("gd_flags: 0x%016llx\t", (long long) 1089 GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags)); 1090 printf("gd_addr: 0x%016llx\n", (long long) 1091 GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_addr)); 1092 } 1093 #endif 1094 1095 /* No errors; receive the packet. */ 1096 len = GEM_RD_BUFLEN(rxstat); 1097 1098 m->m_data += 2; /* We're already off by two */ 1099 1100 ifp->if_ipackets++; 1101 m->m_pkthdr.rcvif = ifp; 1102 m->m_pkthdr.len = m->m_len = len; 1103 1104 gem_rxcksum(m, rxstat); 1105 1106 #if NBPFILTER > 0 1107 if (ifp->if_bpf) 1108 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1109 #endif /* NBPFILTER > 0 */ 1110 1111 /* Pass it on. */ 1112 ether_input_mbuf(ifp, m); 1113 } 1114 1115 /* Update the receive pointer. */ 1116 sc->sc_rx_cons = i; 1117 gem_fill_rx_ring(sc); 1118 bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod); 1119 1120 DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n", 1121 sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1122 1123 return (1); 1124 } 1125 1126 void 1127 gem_fill_rx_ring(struct gem_softc *sc) 1128 { 1129 while (sc->sc_rx_cnt < (GEM_NRXDESC - 4)) { 1130 if (gem_add_rxbuf(sc, sc->sc_rx_prod)) 1131 break; 1132 } 1133 } 1134 1135 /* 1136 * Add a receive buffer to the indicated descriptor. 1137 */ 1138 int 1139 gem_add_rxbuf(struct gem_softc *sc, int idx) 1140 { 1141 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1142 struct mbuf *m; 1143 int error; 1144 1145 m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES); 1146 if (!m) 1147 return (ENOBUFS); 1148 m->m_len = m->m_pkthdr.len = MCLBYTES; 1149 1150 #ifdef GEM_DEBUG 1151 /* bzero the packet to check dma */ 1152 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1153 #endif 1154 1155 rxs->rxs_mbuf = m; 1156 1157 error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m, 1158 BUS_DMA_READ|BUS_DMA_NOWAIT); 1159 if (error) { 1160 printf("%s: can't load rx DMA map %d, error = %d\n", 1161 sc->sc_dev.dv_xname, idx, error); 1162 panic("gem_add_rxbuf"); /* XXX */ 1163 } 1164 1165 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1166 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1167 1168 GEM_INIT_RXDESC(sc, idx); 1169 1170 sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod); 1171 sc->sc_rx_cnt++; 1172 1173 return (0); 1174 } 1175 1176 int 1177 gem_eint(struct gem_softc *sc, u_int status) 1178 { 1179 if ((status & GEM_INTR_MIF) != 0) { 1180 #ifdef GEM_DEBUG 1181 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1182 #endif 1183 return (1); 1184 } 1185 1186 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS); 1187 return (1); 1188 } 1189 1190 int 1191 gem_pint(struct gem_softc *sc) 1192 { 1193 bus_space_tag_t t = sc->sc_bustag; 1194 bus_space_handle_t seb = sc->sc_h1; 1195 u_int32_t status; 1196 1197 status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS); 1198 status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS); 1199 #ifdef GEM_DEBUG 1200 if (status) 1201 printf("%s: link status changed\n", sc->sc_dev.dv_xname); 1202 #endif 1203 return (1); 1204 } 1205 1206 int 1207 gem_intr(void *v) 1208 { 1209 struct gem_softc *sc = (struct gem_softc *)v; 1210 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1211 bus_space_tag_t t = sc->sc_bustag; 1212 bus_space_handle_t seb = sc->sc_h1; 1213 u_int32_t status; 1214 int r = 0; 1215 1216 status = bus_space_read_4(t, seb, GEM_STATUS); 1217 DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", 1218 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); 1219 1220 if (status == 0xffffffff) 1221 return (0); 1222 1223 if ((status & GEM_INTR_PCS) != 0) 1224 r |= gem_pint(sc); 1225 1226 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1227 r |= gem_eint(sc, status); 1228 1229 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1230 r |= gem_tint(sc, status); 1231 1232 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1233 r |= gem_rint(sc); 1234 1235 /* We should eventually do more than just print out error stats. */ 1236 if (status & GEM_INTR_TX_MAC) { 1237 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1238 #ifdef GEM_DEBUG 1239 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1240 printf("%s: MAC tx fault, status %x\n", 1241 sc->sc_dev.dv_xname, txstat); 1242 #endif 1243 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1244 gem_init(ifp); 1245 } 1246 if (status & GEM_INTR_RX_MAC) { 1247 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1248 #ifdef GEM_DEBUG 1249 if (rxstat & ~GEM_MAC_RX_DONE) 1250 printf("%s: MAC rx fault, status %x\n", 1251 sc->sc_dev.dv_xname, rxstat); 1252 #endif 1253 if (rxstat & GEM_MAC_RX_OVERFLOW) { 1254 ifp->if_ierrors++; 1255 1256 /* 1257 * Apparently a silicon bug causes ERI to hang 1258 * from time to time. So if we detect an RX 1259 * FIFO overflow, we fire off a timer, and 1260 * check whether we're still making progress 1261 * by looking at the RX FIFO write and read 1262 * pointers. 1263 */ 1264 sc->sc_rx_fifo_wr_ptr = 1265 bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR); 1266 sc->sc_rx_fifo_rd_ptr = 1267 bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR); 1268 timeout_add_msec(&sc->sc_rx_watchdog, 400); 1269 } 1270 #ifdef GEM_DEBUG 1271 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1272 printf("%s: MAC rx fault, status %x\n", 1273 sc->sc_dev.dv_xname, rxstat); 1274 #endif 1275 } 1276 return (r); 1277 } 1278 1279 void 1280 gem_rx_watchdog(void *arg) 1281 { 1282 struct gem_softc *sc = arg; 1283 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1284 bus_space_tag_t t = sc->sc_bustag; 1285 bus_space_handle_t h = sc->sc_h1; 1286 u_int32_t rx_fifo_wr_ptr; 1287 u_int32_t rx_fifo_rd_ptr; 1288 u_int32_t state; 1289 1290 if ((ifp->if_flags & IFF_RUNNING) == 0) 1291 return; 1292 1293 rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR); 1294 rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR); 1295 state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE); 1296 if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW && 1297 ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) || 1298 ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) && 1299 (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr)))) { 1300 /* 1301 * The RX state machine is still in overflow state and 1302 * the RX FIFO write and read pointers seem to be 1303 * stuck. Whack the chip over the head to get things 1304 * going again. 1305 */ 1306 gem_init(ifp); 1307 } 1308 } 1309 1310 void 1311 gem_watchdog(struct ifnet *ifp) 1312 { 1313 struct gem_softc *sc = ifp->if_softc; 1314 1315 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1316 "GEM_MAC_RX_CONFIG %x\n", 1317 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), 1318 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), 1319 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); 1320 1321 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1322 ++ifp->if_oerrors; 1323 1324 /* Try to get more packets going. */ 1325 gem_init(ifp); 1326 } 1327 1328 /* 1329 * Initialize the MII Management Interface 1330 */ 1331 void 1332 gem_mifinit(struct gem_softc *sc) 1333 { 1334 bus_space_tag_t t = sc->sc_bustag; 1335 bus_space_handle_t mif = sc->sc_h1; 1336 1337 /* Configure the MIF in frame mode */ 1338 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1339 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1340 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1341 } 1342 1343 /* 1344 * MII interface 1345 * 1346 * The GEM MII interface supports at least three different operating modes: 1347 * 1348 * Bitbang mode is implemented using data, clock and output enable registers. 1349 * 1350 * Frame mode is implemented by loading a complete frame into the frame 1351 * register and polling the valid bit for completion. 1352 * 1353 * Polling mode uses the frame register but completion is indicated by 1354 * an interrupt. 1355 * 1356 */ 1357 int 1358 gem_mii_readreg(struct device *self, int phy, int reg) 1359 { 1360 struct gem_softc *sc = (void *)self; 1361 bus_space_tag_t t = sc->sc_bustag; 1362 bus_space_handle_t mif = sc->sc_h1; 1363 int n; 1364 u_int32_t v; 1365 1366 #ifdef GEM_DEBUG 1367 if (sc->sc_debug) 1368 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1369 #endif 1370 1371 /* Construct the frame command */ 1372 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1373 GEM_MIF_FRAME_READ; 1374 1375 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1376 for (n = 0; n < 100; n++) { 1377 DELAY(1); 1378 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1379 if (v & GEM_MIF_FRAME_TA0) 1380 return (v & GEM_MIF_FRAME_DATA); 1381 } 1382 1383 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1384 return (0); 1385 } 1386 1387 void 1388 gem_mii_writereg(struct device *self, int phy, int reg, int val) 1389 { 1390 struct gem_softc *sc = (void *)self; 1391 bus_space_tag_t t = sc->sc_bustag; 1392 bus_space_handle_t mif = sc->sc_h1; 1393 int n; 1394 u_int32_t v; 1395 1396 #ifdef GEM_DEBUG 1397 if (sc->sc_debug) 1398 printf("gem_mii_writereg: phy %d reg %d val %x\n", 1399 phy, reg, val); 1400 #endif 1401 1402 /* Construct the frame command */ 1403 v = GEM_MIF_FRAME_WRITE | 1404 (phy << GEM_MIF_PHY_SHIFT) | 1405 (reg << GEM_MIF_REG_SHIFT) | 1406 (val & GEM_MIF_FRAME_DATA); 1407 1408 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1409 for (n = 0; n < 100; n++) { 1410 DELAY(1); 1411 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1412 if (v & GEM_MIF_FRAME_TA0) 1413 return; 1414 } 1415 1416 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1417 } 1418 1419 void 1420 gem_mii_statchg(struct device *dev) 1421 { 1422 struct gem_softc *sc = (void *)dev; 1423 #ifdef GEM_DEBUG 1424 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1425 #endif 1426 bus_space_tag_t t = sc->sc_bustag; 1427 bus_space_handle_t mac = sc->sc_h1; 1428 u_int32_t v; 1429 1430 #ifdef GEM_DEBUG 1431 if (sc->sc_debug) 1432 printf("gem_mii_statchg: status change: phy = %d\n", instance); 1433 #endif 1434 1435 /* Set tx full duplex options */ 1436 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1437 delay(10000); /* reg must be cleared and delay before changing. */ 1438 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1439 GEM_MAC_TX_ENABLE; 1440 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1441 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1442 } 1443 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1444 1445 /* XIF Configuration */ 1446 v = GEM_MAC_XIF_TX_MII_ENA; 1447 v |= GEM_MAC_XIF_LINK_LED; 1448 1449 /* External MII needs echo disable if half duplex. */ 1450 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1451 /* turn on full duplex LED */ 1452 v |= GEM_MAC_XIF_FDPLX_LED; 1453 else 1454 /* half duplex -- disable echo */ 1455 v |= GEM_MAC_XIF_ECHO_DISABL; 1456 1457 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1458 case IFM_1000_T: /* Gigabit using GMII interface */ 1459 case IFM_1000_SX: 1460 v |= GEM_MAC_XIF_GMII_MODE; 1461 break; 1462 default: 1463 v &= ~GEM_MAC_XIF_GMII_MODE; 1464 } 1465 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1466 1467 /* 1468 * 802.3x flow control 1469 */ 1470 v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG); 1471 v &= ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 1472 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1473 v |= GEM_MAC_CC_RX_PAUSE; 1474 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1475 v |= GEM_MAC_CC_TX_PAUSE; 1476 bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v); 1477 } 1478 1479 int 1480 gem_pcs_readreg(struct device *self, int phy, int reg) 1481 { 1482 struct gem_softc *sc = (void *)self; 1483 bus_space_tag_t t = sc->sc_bustag; 1484 bus_space_handle_t pcs = sc->sc_h1; 1485 1486 #ifdef GEM_DEBUG 1487 if (sc->sc_debug) 1488 printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg); 1489 #endif 1490 1491 if (phy != GEM_PHYAD_EXTERNAL) 1492 return (0); 1493 1494 switch (reg) { 1495 case MII_BMCR: 1496 reg = GEM_MII_CONTROL; 1497 break; 1498 case MII_BMSR: 1499 reg = GEM_MII_STATUS; 1500 break; 1501 case MII_ANAR: 1502 reg = GEM_MII_ANAR; 1503 break; 1504 case MII_ANLPAR: 1505 reg = GEM_MII_ANLPAR; 1506 break; 1507 case MII_EXTSR: 1508 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1509 default: 1510 return (0); 1511 } 1512 1513 return bus_space_read_4(t, pcs, reg); 1514 } 1515 1516 void 1517 gem_pcs_writereg(struct device *self, int phy, int reg, int val) 1518 { 1519 struct gem_softc *sc = (void *)self; 1520 bus_space_tag_t t = sc->sc_bustag; 1521 bus_space_handle_t pcs = sc->sc_h1; 1522 int reset = 0; 1523 1524 #ifdef GEM_DEBUG 1525 if (sc->sc_debug) 1526 printf("gem_pcs_writereg: phy %d reg %d val %x\n", 1527 phy, reg, val); 1528 #endif 1529 1530 if (phy != GEM_PHYAD_EXTERNAL) 1531 return; 1532 1533 if (reg == MII_ANAR) 1534 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0); 1535 1536 switch (reg) { 1537 case MII_BMCR: 1538 reset = (val & GEM_MII_CONTROL_RESET); 1539 reg = GEM_MII_CONTROL; 1540 break; 1541 case MII_BMSR: 1542 reg = GEM_MII_STATUS; 1543 break; 1544 case MII_ANAR: 1545 reg = GEM_MII_ANAR; 1546 break; 1547 case MII_ANLPAR: 1548 reg = GEM_MII_ANLPAR; 1549 break; 1550 default: 1551 return; 1552 } 1553 1554 bus_space_write_4(t, pcs, reg, val); 1555 1556 if (reset) 1557 gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0); 1558 1559 if (reg == GEM_MII_ANAR || reset) { 1560 bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL, 1561 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D); 1562 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 1563 GEM_MII_CONFIG_ENABLE); 1564 } 1565 } 1566 1567 int 1568 gem_mediachange(struct ifnet *ifp) 1569 { 1570 struct gem_softc *sc = ifp->if_softc; 1571 struct mii_data *mii = &sc->sc_mii; 1572 1573 if (mii->mii_instance) { 1574 struct mii_softc *miisc; 1575 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1576 mii_phy_reset(miisc); 1577 } 1578 1579 return (mii_mediachg(&sc->sc_mii)); 1580 } 1581 1582 void 1583 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1584 { 1585 struct gem_softc *sc = ifp->if_softc; 1586 1587 mii_pollstat(&sc->sc_mii); 1588 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1589 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1590 } 1591 1592 /* 1593 * Process an ioctl request. 1594 */ 1595 int 1596 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1597 { 1598 struct gem_softc *sc = ifp->if_softc; 1599 struct ifaddr *ifa = (struct ifaddr *)data; 1600 struct ifreq *ifr = (struct ifreq *)data; 1601 int s, error = 0; 1602 1603 s = splnet(); 1604 1605 switch (cmd) { 1606 case SIOCSIFADDR: 1607 ifp->if_flags |= IFF_UP; 1608 if ((ifp->if_flags & IFF_RUNNING) == 0) 1609 gem_init(ifp); 1610 #ifdef INET 1611 if (ifa->ifa_addr->sa_family == AF_INET) 1612 arp_ifinit(&sc->sc_arpcom, ifa); 1613 #endif 1614 break; 1615 1616 case SIOCSIFFLAGS: 1617 if (ifp->if_flags & IFF_UP) { 1618 if (ifp->if_flags & IFF_RUNNING) 1619 error = ENETRESET; 1620 else 1621 gem_init(ifp); 1622 } else { 1623 if (ifp->if_flags & IFF_RUNNING) 1624 gem_stop(ifp, 0); 1625 } 1626 #ifdef GEM_DEBUG 1627 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1628 #endif 1629 break; 1630 1631 case SIOCGIFMEDIA: 1632 case SIOCSIFMEDIA: 1633 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1634 break; 1635 1636 default: 1637 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1638 } 1639 1640 if (error == ENETRESET) { 1641 if (ifp->if_flags & IFF_RUNNING) 1642 gem_iff(sc); 1643 error = 0; 1644 } 1645 1646 splx(s); 1647 return (error); 1648 } 1649 1650 void 1651 gem_iff(struct gem_softc *sc) 1652 { 1653 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1654 struct arpcom *ac = &sc->sc_arpcom; 1655 struct ether_multi *enm; 1656 struct ether_multistep step; 1657 bus_space_tag_t t = sc->sc_bustag; 1658 bus_space_handle_t h = sc->sc_h1; 1659 u_int32_t crc, hash[16], rxcfg; 1660 int i; 1661 1662 rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1663 rxcfg &= ~(GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_PROMISCUOUS | 1664 GEM_MAC_RX_PROMISC_GRP); 1665 ifp->if_flags &= ~IFF_ALLMULTI; 1666 1667 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1668 ifp->if_flags |= IFF_ALLMULTI; 1669 if (ifp->if_flags & IFF_PROMISC) 1670 rxcfg |= GEM_MAC_RX_PROMISCUOUS; 1671 else 1672 rxcfg |= GEM_MAC_RX_PROMISC_GRP; 1673 } else { 1674 /* 1675 * Set up multicast address filter by passing all multicast 1676 * addresses through a crc generator, and then using the 1677 * high order 8 bits as an index into the 256 bit logical 1678 * address filter. The high order 4 bits selects the word, 1679 * while the other 4 bits select the bit within the word 1680 * (where bit 0 is the MSB). 1681 */ 1682 1683 rxcfg |= GEM_MAC_RX_HASH_FILTER; 1684 1685 /* Clear hash table */ 1686 for (i = 0; i < 16; i++) 1687 hash[i] = 0; 1688 1689 ETHER_FIRST_MULTI(step, ac, enm); 1690 while (enm != NULL) { 1691 crc = ether_crc32_le(enm->enm_addrlo, 1692 ETHER_ADDR_LEN); 1693 1694 /* Just want the 8 most significant bits. */ 1695 crc >>= 24; 1696 1697 /* Set the corresponding bit in the filter. */ 1698 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1699 1700 ETHER_NEXT_MULTI(step, enm); 1701 } 1702 1703 /* Now load the hash table into the chip (if we are using it) */ 1704 for (i = 0; i < 16; i++) { 1705 bus_space_write_4(t, h, 1706 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 1707 hash[i]); 1708 } 1709 } 1710 1711 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg); 1712 } 1713 1714 /* 1715 * Transmit interrupt. 1716 */ 1717 int 1718 gem_tint(struct gem_softc *sc, u_int32_t status) 1719 { 1720 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1721 struct gem_sxd *sd; 1722 u_int32_t cons, hwcons; 1723 1724 hwcons = status >> 19; 1725 cons = sc->sc_tx_cons; 1726 while (cons != hwcons) { 1727 sd = &sc->sc_txd[cons]; 1728 if (sd->sd_mbuf != NULL) { 1729 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 1730 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1731 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 1732 m_freem(sd->sd_mbuf); 1733 sd->sd_mbuf = NULL; 1734 ifp->if_opackets++; 1735 } 1736 sc->sc_tx_cnt--; 1737 if (++cons == GEM_NTXDESC) 1738 cons = 0; 1739 } 1740 sc->sc_tx_cons = cons; 1741 1742 if (sc->sc_tx_cnt < GEM_NTXDESC - 2) 1743 ifp->if_flags &= ~IFF_OACTIVE; 1744 if (sc->sc_tx_cnt == 0) 1745 ifp->if_timer = 0; 1746 1747 gem_start(ifp); 1748 1749 return (1); 1750 } 1751 1752 void 1753 gem_start(struct ifnet *ifp) 1754 { 1755 struct gem_softc *sc = ifp->if_softc; 1756 struct mbuf *m; 1757 u_int64_t flags; 1758 bus_dmamap_t map; 1759 u_int32_t cur, frag, i; 1760 int error; 1761 1762 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1763 return; 1764 1765 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) { 1766 IFQ_POLL(&ifp->if_snd, m); 1767 if (m == NULL) 1768 break; 1769 1770 /* 1771 * Encapsulate this packet and start it going... 1772 * or fail... 1773 */ 1774 1775 cur = frag = sc->sc_tx_prod; 1776 map = sc->sc_txd[cur].sd_map; 1777 1778 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1779 BUS_DMA_NOWAIT); 1780 if (error != 0 && error != EFBIG) 1781 goto drop; 1782 if (error != 0) { 1783 /* Too many fragments, linearize. */ 1784 if (m_defrag(m, M_DONTWAIT)) 1785 goto drop; 1786 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1787 BUS_DMA_NOWAIT); 1788 if (error != 0) 1789 goto drop; 1790 } 1791 1792 if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) { 1793 bus_dmamap_unload(sc->sc_dmatag, map); 1794 ifp->if_flags |= IFF_OACTIVE; 1795 break; 1796 } 1797 1798 /* We are now committed to transmitting the packet. */ 1799 IFQ_DEQUEUE(&ifp->if_snd, m); 1800 1801 #if NBPFILTER > 0 1802 /* 1803 * If BPF is listening on this interface, let it see the 1804 * packet before we commit it to the wire. 1805 */ 1806 if (ifp->if_bpf) 1807 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1808 #endif 1809 1810 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1811 BUS_DMASYNC_PREWRITE); 1812 1813 for (i = 0; i < map->dm_nsegs; i++) { 1814 GEM_DMA_WRITE(sc, &sc->sc_txdescs[frag].gd_addr, 1815 map->dm_segs[i].ds_addr); 1816 flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE; 1817 if (i == 0) 1818 flags |= GEM_TD_START_OF_PACKET; 1819 if (i == (map->dm_nsegs - 1)) 1820 flags |= GEM_TD_END_OF_PACKET; 1821 GEM_DMA_WRITE(sc, &sc->sc_txdescs[frag].gd_flags, 1822 flags); 1823 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1824 GEM_CDTXOFF(frag), sizeof(struct gem_desc), 1825 BUS_DMASYNC_PREWRITE); 1826 cur = frag; 1827 if (++frag == GEM_NTXDESC) 1828 frag = 0; 1829 } 1830 1831 sc->sc_tx_cnt += map->dm_nsegs; 1832 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map; 1833 sc->sc_txd[cur].sd_map = map; 1834 sc->sc_txd[cur].sd_mbuf = m; 1835 1836 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag); 1837 sc->sc_tx_prod = frag; 1838 1839 ifp->if_timer = 5; 1840 } 1841 1842 return; 1843 1844 drop: 1845 IFQ_DEQUEUE(&ifp->if_snd, m); 1846 m_freem(m); 1847 ifp->if_oerrors++; 1848 } 1849