1 /* $NetBSD: if_gfe.c,v 1.17 2005/12/11 12:22:16 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the NetBSD Project by 18 * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 * or promote products derived from this software without specific prior 21 * written permission. 22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * if_gfe.c -- GT ethernet MAC driver 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.17 2005/12/11 12:22:16 christos Exp $"); 46 47 #include "opt_inet.h" 48 #include "bpfilter.h" 49 50 #include <sys/param.h> 51 #include <sys/types.h> 52 #include <sys/inttypes.h> 53 #include <sys/queue.h> 54 55 #include <uvm/uvm_extern.h> 56 57 #include <sys/callout.h> 58 #include <sys/device.h> 59 #include <sys/errno.h> 60 #include <sys/ioctl.h> 61 #include <sys/mbuf.h> 62 #include <sys/socket.h> 63 64 #include <machine/bus.h> 65 66 #include <net/if.h> 67 #include <net/if_dl.h> 68 #include <net/if_ether.h> 69 #include <net/if_media.h> 70 71 #ifdef INET 72 #include <netinet/in.h> 73 #include <netinet/if_inarp.h> 74 #endif 75 #if NBPFILTER > 0 76 #include <net/bpf.h> 77 #endif 78 79 #include <dev/mii/miivar.h> 80 81 #include <dev/marvell/gtintrreg.h> 82 #include <dev/marvell/gtethreg.h> 83 84 #include <dev/marvell/gtvar.h> 85 #include <dev/marvell/if_gfevar.h> 86 87 #define GE_READ(sc, reg) \ 88 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) 89 #define GE_WRITE(sc, reg, v) \ 90 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) 91 92 #define GE_DEBUG 93 #if 0 94 #define GE_NOHASH 95 #define GE_NORX 96 #endif 97 98 #ifdef GE_DEBUG 99 #define GE_DPRINTF(sc, a) do \ 100 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 101 printf a; \ 102 while (0) 103 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 104 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 105 #else 106 #define GE_DPRINTF(sc, a) do { } while (0) 107 #define GE_FUNC_ENTER(sc, func) do { } while (0) 108 #define GE_FUNC_EXIT(sc, str) do { } while (0) 109 #endif 110 enum gfe_whack_op { 111 GE_WHACK_START, GE_WHACK_RESTART, 112 GE_WHACK_CHANGE, GE_WHACK_STOP 113 }; 114 115 enum gfe_hash_op { 116 GE_HASH_ADD, GE_HASH_REMOVE, 117 }; 118 119 #if 1 120 #define htogt32(a) htobe32(a) 121 #define gt32toh(a) be32toh(a) 122 #else 123 #define htogt32(a) htole32(a) 124 #define gt32toh(a) le32toh(a) 125 #endif 126 127 #define GE_RXDSYNC(sc, rxq, n, ops) \ 128 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 129 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 130 (ops)) 131 #define GE_RXDPRESYNC(sc, rxq, n) \ 132 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 133 #define GE_RXDPOSTSYNC(sc, rxq, n) \ 134 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 135 136 #define GE_TXDSYNC(sc, txq, n, ops) \ 137 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 138 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 139 (ops)) 140 #define GE_TXDPRESYNC(sc, txq, n) \ 141 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 142 #define GE_TXDPOSTSYNC(sc, txq, n) \ 143 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 144 145 #define STATIC 146 147 STATIC int gfe_match (struct device *, struct cfdata *, void *); 148 STATIC void gfe_attach (struct device *, struct device *, void *); 149 150 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 151 size_t, int); 152 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 153 154 STATIC int gfe_ifioctl (struct ifnet *, u_long, caddr_t); 155 STATIC void gfe_ifstart (struct ifnet *); 156 STATIC void gfe_ifwatchdog (struct ifnet *); 157 158 STATIC int gfe_mii_mediachange (struct ifnet *); 159 STATIC void gfe_mii_mediastatus (struct ifnet *, struct ifmediareq *); 160 STATIC int gfe_mii_read (struct device *, int, int); 161 STATIC void gfe_mii_write (struct device *, int, int, int); 162 STATIC void gfe_mii_statchg (struct device *); 163 164 STATIC void gfe_tick(void *arg); 165 166 STATIC void gfe_tx_restart(void *); 167 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 168 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 169 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 170 STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); 171 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 172 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 173 174 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 175 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 176 STATIC int gfe_rx_prime(struct gfe_softc *); 177 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 178 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 179 STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); 180 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 181 182 STATIC int gfe_intr(void *); 183 184 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 185 186 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 187 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 188 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 189 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 190 u_long); 191 STATIC int gfe_hash_fill(struct gfe_softc *); 192 STATIC int gfe_hash_alloc(struct gfe_softc *); 193 194 /* Linkup to the rest of the kernel */ 195 CFATTACH_DECL(gfe, sizeof(struct gfe_softc), 196 gfe_match, gfe_attach, NULL, NULL); 197 198 extern struct cfdriver gfe_cd; 199 200 int 201 gfe_match(struct device *parent, struct cfdata *cf, void *aux) 202 { 203 struct gt_softc *gt = (struct gt_softc *) parent; 204 struct gt_attach_args *ga = aux; 205 uint8_t enaddr[6]; 206 207 if (!GT_ETHEROK(gt, ga, &gfe_cd)) 208 return 0; 209 210 if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) 211 return 0; 212 213 if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && 214 enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) 215 return 0; 216 217 return 1; 218 } 219 220 /* 221 * Attach this instance, and then all the sub-devices 222 */ 223 void 224 gfe_attach(struct device *parent, struct device *self, void *aux) 225 { 226 struct gt_attach_args * const ga = aux; 227 struct gt_softc * const gt = (struct gt_softc *) parent; 228 struct gfe_softc * const sc = (struct gfe_softc *) self; 229 struct ifnet * const ifp = &sc->sc_ec.ec_if; 230 uint32_t data; 231 uint8_t enaddr[6]; 232 int phyaddr; 233 uint32_t sdcr; 234 int error; 235 236 GT_ETHERFOUND(gt, ga); 237 238 sc->sc_gt_memt = ga->ga_memt; 239 sc->sc_gt_memh = ga->ga_memh; 240 sc->sc_dmat = ga->ga_dmat; 241 sc->sc_macno = ga->ga_unit; 242 243 if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, 244 ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { 245 aprint_error(": failed to map registers\n"); 246 } 247 248 callout_init(&sc->sc_co); 249 250 data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); 251 phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); 252 253 gtget_macaddr(gt, sc->sc_macno, enaddr); 254 255 sc->sc_pcr = GE_READ(sc, EPCR); 256 sc->sc_pcxr = GE_READ(sc, EPCXR); 257 sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; 258 259 aprint_normal(": address %s", ether_sprintf(enaddr)); 260 261 #if defined(DEBUG) 262 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 263 #endif 264 265 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 266 if (sc->sc_dev.dv_cfdata->cf_flags & 1) { 267 aprint_normal(", phy %d (rmii)", phyaddr); 268 sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 269 } else { 270 aprint_normal(", phy %d (mii)", phyaddr); 271 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 272 } 273 if (sc->sc_dev.dv_cfdata->cf_flags & 2) 274 sc->sc_flags |= GE_NOFREE; 275 sc->sc_pcxr &= ~(3 << 14); 276 sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); 277 278 if (sc->sc_pcr & ETH_EPCR_EN) { 279 int tries = 1000; 280 /* 281 * Abort transmitter and receiver and wait for them to quiese 282 */ 283 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); 284 do { 285 delay(100); 286 } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); 287 } 288 289 sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 290 291 #if defined(DEBUG) 292 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 293 #endif 294 295 /* 296 * Now turn off the GT. If it didn't quiese, too ***ing bad. 297 */ 298 GE_WRITE(sc, EPCR, sc->sc_pcr); 299 GE_WRITE(sc, EIMR, sc->sc_intrmask); 300 sdcr = GE_READ(sc, ESDCR); 301 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 302 sdcr |= ETH_ESDCR_RIFB; 303 GE_WRITE(sc, ESDCR, sdcr); 304 sc->sc_max_frame_length = 1536; 305 306 aprint_normal("\n"); 307 sc->sc_mii.mii_ifp = ifp; 308 sc->sc_mii.mii_readreg = gfe_mii_read; 309 sc->sc_mii.mii_writereg = gfe_mii_write; 310 sc->sc_mii.mii_statchg = gfe_mii_statchg; 311 312 ifmedia_init(&sc->sc_mii.mii_media, 0, gfe_mii_mediachange, 313 gfe_mii_mediastatus); 314 315 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, 316 MII_OFFSET_ANY, MIIF_NOISOLATE); 317 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 318 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 319 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 320 } else { 321 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 322 } 323 324 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 325 ifp->if_softc = sc; 326 /* ifp->if_mowner = &sc->sc_mowner; */ 327 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 328 #if 0 329 ifp->if_flags |= IFF_DEBUG; 330 #endif 331 ifp->if_ioctl = gfe_ifioctl; 332 ifp->if_start = gfe_ifstart; 333 ifp->if_watchdog = gfe_ifwatchdog; 334 335 if (sc->sc_flags & GE_NOFREE) { 336 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 337 if (!error) 338 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 339 if (!error) 340 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 341 if (!error) 342 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 343 if (!error) 344 error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); 345 if (!error) 346 error = gfe_hash_alloc(sc); 347 if (error) 348 aprint_error( 349 "%s: failed to allocate resources: %d\n", 350 ifp->if_xname, error); 351 } 352 353 if_attach(ifp); 354 ether_ifattach(ifp, enaddr); 355 #if NBPFILTER > 0 356 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 357 #endif 358 #if NRND > 0 359 rnd_attach_source(&sc->sc_rnd_source, self->dv_xname, RND_TYPE_NET, 0); 360 #endif 361 intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, 362 gfe_intr, sc); 363 } 364 365 int 366 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 367 size_t size, int flags) 368 { 369 int error = 0; 370 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 371 372 KASSERT(gdm->gdm_kva == NULL); 373 gdm->gdm_size = size; 374 gdm->gdm_maxsegs = maxsegs; 375 376 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 377 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 378 BUS_DMA_NOWAIT); 379 if (error) 380 goto fail; 381 382 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 383 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 384 if (error) 385 goto fail; 386 387 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 388 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); 389 if (error) 390 goto fail; 391 392 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 393 gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 394 if (error) 395 goto fail; 396 397 /* invalidate from cache */ 398 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 399 BUS_DMASYNC_PREREAD); 400 fail: 401 if (error) { 402 gfe_dmamem_free(sc, gdm); 403 GE_DPRINTF(sc, (":err=%d", error)); 404 } 405 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 406 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 407 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 408 GE_FUNC_EXIT(sc, ""); 409 return error; 410 } 411 412 void 413 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 414 { 415 GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 416 if (gdm->gdm_map) 417 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 418 if (gdm->gdm_kva) 419 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 420 if (gdm->gdm_nsegs > 0) 421 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 422 gdm->gdm_map = NULL; 423 gdm->gdm_kva = NULL; 424 gdm->gdm_nsegs = 0; 425 GE_FUNC_EXIT(sc, ""); 426 } 427 428 int 429 gfe_ifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 430 { 431 struct gfe_softc * const sc = ifp->if_softc; 432 struct ifreq *ifr = (struct ifreq *) data; 433 struct ifaddr *ifa = (struct ifaddr *) data; 434 int s, error = 0; 435 436 GE_FUNC_ENTER(sc, "gfe_ifioctl"); 437 s = splnet(); 438 439 switch (cmd) { 440 case SIOCSIFADDR: 441 ifp->if_flags |= IFF_UP; 442 switch (ifa->ifa_addr->sa_family) { 443 #ifdef INET 444 case AF_INET: 445 error = gfe_whack(sc, GE_WHACK_START); 446 if (error == 0) 447 arp_ifinit(ifp, ifa); 448 break; 449 #endif 450 default: 451 error = gfe_whack(sc, GE_WHACK_START); 452 break; 453 } 454 break; 455 456 case SIOCSIFFLAGS: 457 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 458 case IFF_UP|IFF_RUNNING:/* active->active, update */ 459 error = gfe_whack(sc, GE_WHACK_CHANGE); 460 break; 461 case IFF_RUNNING: /* not up, so we stop */ 462 error = gfe_whack(sc, GE_WHACK_STOP); 463 break; 464 case IFF_UP: /* not running, so we start */ 465 error = gfe_whack(sc, GE_WHACK_START); 466 break; 467 case 0: /* idle->idle: do nothing */ 468 break; 469 } 470 break; 471 472 case SIOCADDMULTI: 473 case SIOCDELMULTI: 474 error = (cmd == SIOCADDMULTI) 475 ? ether_addmulti(ifr, &sc->sc_ec) 476 : ether_delmulti(ifr, &sc->sc_ec); 477 if (error == ENETRESET) { 478 if (ifp->if_flags & IFF_RUNNING) 479 error = gfe_whack(sc, GE_WHACK_CHANGE); 480 else 481 error = 0; 482 } 483 break; 484 485 case SIOCSIFMTU: 486 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 487 error = EINVAL; 488 break; 489 } 490 ifp->if_mtu = ifr->ifr_mtu; 491 break; 492 493 case SIOCSIFMEDIA: 494 case SIOCGIFMEDIA: 495 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 496 break; 497 498 default: 499 error = EINVAL; 500 break; 501 } 502 splx(s); 503 GE_FUNC_EXIT(sc, ""); 504 return error; 505 } 506 507 void 508 gfe_ifstart(struct ifnet *ifp) 509 { 510 struct gfe_softc * const sc = ifp->if_softc; 511 struct mbuf *m; 512 513 GE_FUNC_ENTER(sc, "gfe_ifstart"); 514 515 if ((ifp->if_flags & IFF_RUNNING) == 0) { 516 GE_FUNC_EXIT(sc, "$"); 517 return; 518 } 519 520 for (;;) { 521 IF_DEQUEUE(&ifp->if_snd, m); 522 if (m == NULL) { 523 ifp->if_flags &= ~IFF_OACTIVE; 524 GE_FUNC_EXIT(sc, ""); 525 return; 526 } 527 528 /* 529 * No space in the pending queue? try later. 530 */ 531 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) 532 break; 533 534 /* 535 * Try to enqueue a mbuf to the device. If that fails, we 536 * can always try to map the next mbuf. 537 */ 538 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); 539 GE_DPRINTF(sc, (">")); 540 #ifndef GE_NOTX 541 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 542 #endif 543 } 544 545 /* 546 * Attempt to queue the mbuf for send failed. 547 */ 548 IF_PREPEND(&ifp->if_snd, m); 549 ifp->if_flags |= IFF_OACTIVE; 550 GE_FUNC_EXIT(sc, "%%"); 551 } 552 553 void 554 gfe_ifwatchdog(struct ifnet *ifp) 555 { 556 struct gfe_softc * const sc = ifp->if_softc; 557 struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; 558 559 GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 560 printf("%s: device timeout", sc->sc_dev.dv_xname); 561 if (ifp->if_flags & IFF_RUNNING) { 562 uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 563 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 564 GE_TXDPOSTSYNC(sc, txq, curtxdnum); 565 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 566 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 567 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 568 GE_READ(sc, EICR)); 569 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 570 GE_TXDPRESYNC(sc, txq, curtxdnum); 571 } 572 printf("\n"); 573 ifp->if_oerrors++; 574 (void) gfe_whack(sc, GE_WHACK_RESTART); 575 GE_FUNC_EXIT(sc, ""); 576 } 577 578 int 579 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 580 { 581 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 582 int error; 583 584 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 585 GE_DPRINTF(sc, ("(%d)", rxprio)); 586 587 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 588 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 589 if (error) { 590 GE_FUNC_EXIT(sc, "!!"); 591 return error; 592 } 593 594 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 595 GE_RXBUF_MEMSIZE, 0); 596 if (error) { 597 GE_FUNC_EXIT(sc, "!!!"); 598 return error; 599 } 600 GE_FUNC_EXIT(sc, ""); 601 return error; 602 } 603 604 int 605 gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) 606 { 607 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 608 volatile struct gt_eth_desc *rxd; 609 const bus_dma_segment_t *ds; 610 int idx; 611 bus_addr_t nxtaddr; 612 bus_size_t boff; 613 614 GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); 615 GE_DPRINTF(sc, ("(%d)", rxprio)); 616 617 if ((sc->sc_flags & GE_NOFREE) == 0) { 618 int error = gfe_rx_rxqalloc(sc, rxprio); 619 if (error) { 620 GE_FUNC_EXIT(sc, "!"); 621 return error; 622 } 623 } else { 624 KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); 625 KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); 626 } 627 628 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); 629 630 rxq->rxq_descs = 631 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 632 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 633 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 634 rxq->rxq_fi = 0; 635 rxq->rxq_active = GE_RXDESC_MAX; 636 for (idx = 0, rxd = rxq->rxq_descs, 637 boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, 638 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 639 idx < GE_RXDESC_MAX; 640 idx++, rxd++, nxtaddr += sizeof(*rxd)) { 641 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 642 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 643 rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 644 /* 645 * update the nxtptr to point to the next txd. 646 */ 647 if (idx == GE_RXDESC_MAX - 1) 648 nxtaddr = rxq->rxq_desc_busaddr; 649 rxd->ed_nxtptr = htogt32(nxtaddr); 650 boff += GE_RXBUF_SIZE; 651 if (boff == ds->ds_len) { 652 ds++; 653 boff = 0; 654 } 655 } 656 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 657 rxq->rxq_desc_mem.gdm_map->dm_mapsize, 658 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 659 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 660 rxq->rxq_buf_mem.gdm_map->dm_mapsize, 661 BUS_DMASYNC_PREREAD); 662 663 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; 664 switch (rxprio) { 665 case GE_RXPRIO_HI: 666 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; 667 rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); 668 rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); 669 break; 670 case GE_RXPRIO_MEDHI: 671 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; 672 rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); 673 rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); 674 break; 675 case GE_RXPRIO_MEDLO: 676 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; 677 rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); 678 rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); 679 break; 680 case GE_RXPRIO_LO: 681 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; 682 rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); 683 rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); 684 break; 685 } 686 GE_FUNC_EXIT(sc, ""); 687 return 0; 688 } 689 690 void 691 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 692 { 693 struct ifnet * const ifp = &sc->sc_ec.ec_if; 694 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 695 struct mbuf *m = rxq->rxq_curpkt; 696 697 GE_FUNC_ENTER(sc, "gfe_rx_get"); 698 GE_DPRINTF(sc, ("(%d)", rxprio)); 699 700 while (rxq->rxq_active > 0) { 701 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 702 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 703 const struct ether_header *eh; 704 unsigned int cmdsts; 705 size_t buflen; 706 707 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 708 cmdsts = gt32toh(rxd->ed_cmdsts); 709 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 710 rxq->rxq_cmdsts = cmdsts; 711 /* 712 * Sometimes the GE "forgets" to reset the ownership bit. 713 * But if the length has been rewritten, the packet is ours 714 * so pretend the O bit is set. 715 */ 716 buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 717 if ((cmdsts & RX_CMD_O) && buflen == 0) { 718 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 719 break; 720 } 721 722 /* 723 * If this is not a single buffer packet with no errors 724 * or for some reason it's bigger than our frame size, 725 * ignore it and go to the next packet. 726 */ 727 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != 728 (RX_CMD_F|RX_CMD_L) || 729 buflen > sc->sc_max_frame_length) { 730 GE_DPRINTF(sc, ("!")); 731 --rxq->rxq_active; 732 ifp->if_ipackets++; 733 ifp->if_ierrors++; 734 goto give_it_back; 735 } 736 737 /* CRC is included with the packet; trim it off. */ 738 buflen -= ETHER_CRC_LEN; 739 740 if (m == NULL) { 741 MGETHDR(m, M_DONTWAIT, MT_DATA); 742 if (m == NULL) { 743 GE_DPRINTF(sc, ("?")); 744 break; 745 } 746 } 747 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 748 MCLGET(m, M_DONTWAIT); 749 if ((m->m_flags & M_EXT) == 0) { 750 GE_DPRINTF(sc, ("?")); 751 break; 752 } 753 } 754 m->m_data += 2; 755 m->m_len = 0; 756 m->m_pkthdr.len = 0; 757 m->m_pkthdr.rcvif = ifp; 758 rxq->rxq_cmdsts = cmdsts; 759 --rxq->rxq_active; 760 761 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 762 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 763 764 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 765 memcpy(m->m_data + m->m_len, rxb->rb_data, buflen); 766 m->m_len = buflen; 767 m->m_pkthdr.len = buflen; 768 769 ifp->if_ipackets++; 770 #if NBPFILTER > 0 771 if (ifp->if_bpf != NULL) 772 bpf_mtap(ifp->if_bpf, m); 773 #endif 774 775 eh = (const struct ether_header *) m->m_data; 776 if ((ifp->if_flags & IFF_PROMISC) || 777 (rxq->rxq_cmdsts & RX_STS_M) == 0 || 778 (rxq->rxq_cmdsts & RX_STS_HE) || 779 (eh->ether_dhost[0] & 1) != 0 || 780 memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl), 781 ETHER_ADDR_LEN) == 0) { 782 (*ifp->if_input)(ifp, m); 783 m = NULL; 784 GE_DPRINTF(sc, (">")); 785 } else { 786 m->m_len = 0; 787 m->m_pkthdr.len = 0; 788 GE_DPRINTF(sc, ("+")); 789 } 790 rxq->rxq_cmdsts = 0; 791 792 give_it_back: 793 rxd->ed_lencnt &= ~0xffff; /* zero out length */ 794 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 795 #if 0 796 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 797 rxq->rxq_fi, 798 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 799 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 800 #endif 801 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 802 if (++rxq->rxq_fi == GE_RXDESC_MAX) 803 rxq->rxq_fi = 0; 804 rxq->rxq_active++; 805 } 806 rxq->rxq_curpkt = m; 807 GE_FUNC_EXIT(sc, ""); 808 } 809 810 uint32_t 811 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 812 { 813 struct ifnet * const ifp = &sc->sc_ec.ec_if; 814 struct gfe_rxqueue *rxq; 815 uint32_t rxbits; 816 #define RXPRIO_DECODER 0xffffaa50 817 GE_FUNC_ENTER(sc, "gfe_rx_process"); 818 819 rxbits = ETH_IR_RxBuffer_GET(cause); 820 while (rxbits) { 821 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 822 GE_DPRINTF(sc, ("%1x", rxbits)); 823 rxbits &= ~(1 << rxprio); 824 gfe_rx_get(sc, rxprio); 825 } 826 827 rxbits = ETH_IR_RxError_GET(cause); 828 while (rxbits) { 829 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 830 uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 831 int idx; 832 rxbits &= ~(1 << rxprio); 833 rxq = &sc->sc_rxq[rxprio]; 834 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 835 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 836 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 837 sc->sc_tickflags |= GE_TICK_RX_RESTART; 838 callout_reset(&sc->sc_co, 1, gfe_tick, sc); 839 } 840 ifp->if_ierrors++; 841 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 842 sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi)); 843 memset(masks, 0, sizeof(masks)); 844 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 845 0, rxq->rxq_desc_mem.gdm_size, 846 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 847 for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 848 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 849 850 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 851 masks[idx/32] |= 1 << (idx & 31); 852 } 853 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 854 0, rxq->rxq_desc_mem.gdm_size, 855 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 856 #if defined(DEBUG) 857 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 858 sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi, 859 rxq->rxq_cmdsts, masks[0], masks[1]); 860 #endif 861 } 862 if ((intrmask & ETH_IR_RxBits) == 0) 863 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); 864 865 GE_FUNC_EXIT(sc, ""); 866 return intrmask; 867 } 868 869 int 870 gfe_rx_prime(struct gfe_softc *sc) 871 { 872 struct gfe_rxqueue *rxq; 873 int error; 874 875 GE_FUNC_ENTER(sc, "gfe_rx_prime"); 876 877 error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); 878 if (error) 879 goto bail; 880 rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 881 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 882 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 883 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 884 } 885 sc->sc_intrmask |= rxq->rxq_intrbits; 886 887 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); 888 if (error) 889 goto bail; 890 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 891 rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 892 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 893 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 894 sc->sc_intrmask |= rxq->rxq_intrbits; 895 } 896 897 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); 898 if (error) 899 goto bail; 900 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 901 rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 902 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 903 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 904 sc->sc_intrmask |= rxq->rxq_intrbits; 905 } 906 907 error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); 908 if (error) 909 goto bail; 910 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 911 rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 912 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 913 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 914 sc->sc_intrmask |= rxq->rxq_intrbits; 915 } 916 917 bail: 918 GE_FUNC_EXIT(sc, ""); 919 return error; 920 } 921 922 void 923 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 924 { 925 struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; 926 GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 927 if (rxq == NULL) { 928 GE_FUNC_EXIT(sc, ""); 929 return; 930 } 931 932 if (rxq->rxq_curpkt) 933 m_freem(rxq->rxq_curpkt); 934 if ((sc->sc_flags & GE_NOFREE) == 0) { 935 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 936 gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 937 } 938 GE_FUNC_EXIT(sc, ""); 939 } 940 941 void 942 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 943 { 944 GE_FUNC_ENTER(sc, "gfe_rx_stop"); 945 sc->sc_flags &= ~GE_RXACTIVE; 946 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 947 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 948 GE_WRITE(sc, EIMR, sc->sc_intrmask); 949 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); 950 do { 951 delay(10); 952 } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); 953 gfe_rx_cleanup(sc, GE_RXPRIO_HI); 954 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 955 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 956 gfe_rx_cleanup(sc, GE_RXPRIO_LO); 957 GE_FUNC_EXIT(sc, ""); 958 } 959 960 void 961 gfe_tick(void *arg) 962 { 963 struct gfe_softc * const sc = arg; 964 uint32_t intrmask; 965 unsigned int tickflags; 966 int s; 967 968 GE_FUNC_ENTER(sc, "gfe_tick"); 969 970 s = splnet(); 971 972 tickflags = sc->sc_tickflags; 973 sc->sc_tickflags = 0; 974 intrmask = sc->sc_intrmask; 975 if (tickflags & GE_TICK_TX_IFSTART) 976 gfe_ifstart(&sc->sc_ec.ec_if); 977 if (tickflags & GE_TICK_RX_RESTART) { 978 intrmask |= sc->sc_idlemask; 979 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { 980 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 981 rxq->rxq_fi = 0; 982 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 983 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 984 } 985 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { 986 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 987 rxq->rxq_fi = 0; 988 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 989 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 990 } 991 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { 992 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 993 rxq->rxq_fi = 0; 994 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 995 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 996 } 997 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { 998 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 999 rxq->rxq_fi = 0; 1000 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 1001 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 1002 } 1003 sc->sc_idlemask = 0; 1004 } 1005 if (intrmask != sc->sc_intrmask) { 1006 sc->sc_intrmask = intrmask; 1007 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1008 } 1009 gfe_intr(sc); 1010 splx(s); 1011 1012 GE_FUNC_EXIT(sc, ""); 1013 } 1014 1015 int 1016 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 1017 { 1018 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1019 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1020 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1021 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 1022 uint32_t intrmask = sc->sc_intrmask; 1023 size_t buflen; 1024 struct mbuf *m; 1025 1026 GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1027 1028 /* 1029 * Anything in the pending queue to enqueue? if not, punt. Likewise 1030 * if the txq is not yet created. 1031 * otherwise grab its dmamap. 1032 */ 1033 if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { 1034 GE_FUNC_EXIT(sc, "-"); 1035 return 0; 1036 } 1037 1038 /* 1039 * Have we [over]consumed our limit of descriptors? 1040 * Do we have enough free descriptors? 1041 */ 1042 if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1043 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1044 uint32_t cmdsts; 1045 size_t pktlen; 1046 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1047 cmdsts = gt32toh(txd2->ed_cmdsts); 1048 if (cmdsts & TX_CMD_O) { 1049 int nextin; 1050 /* 1051 * Sometime the Discovery forgets to update the 1052 * last descriptor. See if we own the descriptor 1053 * after it (since we know we've turned that to 1054 * the discovery and if we owned it, the Discovery 1055 * gave it back). If we do, we know the Discovery 1056 * gave back this one but forgot to mark it as ours. 1057 */ 1058 nextin = txq->txq_fi + 1; 1059 if (nextin == GE_TXDESC_MAX) 1060 nextin = 0; 1061 GE_TXDPOSTSYNC(sc, txq, nextin); 1062 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1063 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1064 GE_TXDPRESYNC(sc, txq, nextin); 1065 GE_FUNC_EXIT(sc, "@"); 1066 return 0; 1067 } 1068 #ifdef DEBUG 1069 printf("%s: txenqueue: transmitter resynced at %d\n", 1070 sc->sc_dev.dv_xname, txq->txq_fi); 1071 #endif 1072 } 1073 if (++txq->txq_fi == GE_TXDESC_MAX) 1074 txq->txq_fi = 0; 1075 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1076 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1077 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1078 txq->txq_nactive--; 1079 1080 /* statistics */ 1081 ifp->if_opackets++; 1082 if (cmdsts & TX_STS_ES) 1083 ifp->if_oerrors++; 1084 GE_DPRINTF(sc, ("%%")); 1085 } 1086 1087 buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1088 1089 /* 1090 * If this packet would wrap around the end of the buffer, reset back 1091 * to the beginning. 1092 */ 1093 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1094 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1095 txq->txq_outptr = 0; 1096 } 1097 1098 /* 1099 * Make sure the output packet doesn't run over the beginning of 1100 * what we've already given the GT. 1101 */ 1102 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1103 txq->txq_outptr + buflen > txq->txq_inptr) { 1104 intrmask |= txq->txq_intrbits & 1105 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); 1106 if (sc->sc_intrmask != intrmask) { 1107 sc->sc_intrmask = intrmask; 1108 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1109 } 1110 GE_FUNC_EXIT(sc, "#"); 1111 return 0; 1112 } 1113 1114 /* 1115 * The end-of-list descriptor we put on last time is the starting point 1116 * for this packet. The GT is supposed to terminate list processing on 1117 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1118 * must terminate the list. 1119 */ 1120 intrmask = sc->sc_intrmask; 1121 1122 m_copydata(m, 0, m->m_pkthdr.len, 1123 txq->txq_buf_mem.gdm_kva + txq->txq_outptr); 1124 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1125 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1126 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1127 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1128 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1129 1130 /* 1131 * Request a buffer interrupt every 2/3 of the way thru the transmit 1132 * buffer. 1133 */ 1134 txq->txq_ei_gapcount += buflen; 1135 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1136 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); 1137 txq->txq_ei_gapcount = 0; 1138 } else { 1139 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); 1140 } 1141 #if 0 1142 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1143 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1144 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1145 #endif 1146 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1147 1148 txq->txq_outptr += buflen; 1149 /* 1150 * Tell the SDMA engine to "Fetch!" 1151 */ 1152 GE_WRITE(sc, ESDCMR, 1153 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); 1154 1155 GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1156 1157 /* 1158 * Update the last out appropriately. 1159 */ 1160 txq->txq_nactive++; 1161 if (++txq->txq_lo == GE_TXDESC_MAX) 1162 txq->txq_lo = 0; 1163 1164 /* 1165 * Move mbuf from the pending queue to the snd queue. 1166 */ 1167 IF_DEQUEUE(&txq->txq_pendq, m); 1168 #if NBPFILTER > 0 1169 if (ifp->if_bpf != NULL) 1170 bpf_mtap(ifp->if_bpf, m); 1171 #endif 1172 m_freem(m); 1173 ifp->if_flags &= ~IFF_OACTIVE; 1174 1175 /* 1176 * Since we have put an item into the packet queue, we now want 1177 * an interrupt when the transmit queue finishes processing the 1178 * list. But only update the mask if needs changing. 1179 */ 1180 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); 1181 if (sc->sc_intrmask != intrmask) { 1182 sc->sc_intrmask = intrmask; 1183 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1184 } 1185 if (ifp->if_timer == 0) 1186 ifp->if_timer = 5; 1187 GE_FUNC_EXIT(sc, "*"); 1188 return 1; 1189 } 1190 1191 uint32_t 1192 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1193 { 1194 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1195 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1196 1197 GE_FUNC_ENTER(sc, "gfe_tx_done"); 1198 1199 if (txq == NULL) { 1200 GE_FUNC_EXIT(sc, ""); 1201 return intrmask; 1202 } 1203 1204 while (txq->txq_nactive > 0) { 1205 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1206 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1207 uint32_t cmdsts; 1208 size_t pktlen; 1209 1210 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1211 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1212 int nextin; 1213 1214 if (txq->txq_nactive == 1) { 1215 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1216 GE_FUNC_EXIT(sc, ""); 1217 return intrmask; 1218 } 1219 /* 1220 * Sometimes the Discovery forgets to update the 1221 * ownership bit in the descriptor. See if we own the 1222 * descriptor after it (since we know we've turned 1223 * that to the Discovery and if we own it now then the 1224 * Discovery gave it back). If we do, we know the 1225 * Discovery gave back this one but forgot to mark it 1226 * as ours. 1227 */ 1228 nextin = txq->txq_fi + 1; 1229 if (nextin == GE_TXDESC_MAX) 1230 nextin = 0; 1231 GE_TXDPOSTSYNC(sc, txq, nextin); 1232 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1233 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1234 GE_TXDPRESYNC(sc, txq, nextin); 1235 GE_FUNC_EXIT(sc, ""); 1236 return intrmask; 1237 } 1238 #ifdef DEBUG 1239 printf("%s: txdone: transmitter resynced at %d\n", 1240 sc->sc_dev.dv_xname, txq->txq_fi); 1241 #endif 1242 } 1243 #if 0 1244 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1245 txq->txq_lo, 1246 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1247 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1248 #endif 1249 GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1250 if (++txq->txq_fi == GE_TXDESC_MAX) 1251 txq->txq_fi = 0; 1252 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1253 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1254 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1255 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1256 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1257 1258 /* statistics */ 1259 ifp->if_opackets++; 1260 if (cmdsts & TX_STS_ES) 1261 ifp->if_oerrors++; 1262 1263 /* txd->ed_bufptr = 0; */ 1264 1265 ifp->if_timer = 5; 1266 --txq->txq_nactive; 1267 } 1268 if (txq->txq_nactive != 0) 1269 panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1270 sc->sc_dev.dv_xname, txprio, txq->txq_nactive); 1271 ifp->if_timer = 0; 1272 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); 1273 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); 1274 GE_FUNC_EXIT(sc, ""); 1275 return intrmask; 1276 } 1277 1278 int 1279 gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) 1280 { 1281 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1282 int error; 1283 1284 GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); 1285 1286 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1287 GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); 1288 if (error) { 1289 GE_FUNC_EXIT(sc, ""); 1290 return error; 1291 } 1292 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); 1293 if (error) { 1294 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1295 GE_FUNC_EXIT(sc, ""); 1296 return error; 1297 } 1298 GE_FUNC_EXIT(sc, ""); 1299 return 0; 1300 } 1301 1302 int 1303 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1304 { 1305 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1306 volatile struct gt_eth_desc *txd; 1307 unsigned int i; 1308 bus_addr_t addr; 1309 1310 GE_FUNC_ENTER(sc, "gfe_tx_start"); 1311 1312 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1313 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1314 1315 if (sc->sc_flags & GE_NOFREE) { 1316 KASSERT(txq->txq_desc_mem.gdm_kva != NULL); 1317 KASSERT(txq->txq_buf_mem.gdm_kva != NULL); 1318 } else { 1319 int error = gfe_tx_txqalloc(sc, txprio); 1320 if (error) { 1321 GE_FUNC_EXIT(sc, "!"); 1322 return error; 1323 } 1324 } 1325 1326 txq->txq_descs = 1327 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1328 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1329 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1330 1331 txq->txq_pendq.ifq_maxlen = 10; 1332 txq->txq_ei_gapcount = 0; 1333 txq->txq_nactive = 0; 1334 txq->txq_fi = 0; 1335 txq->txq_lo = 0; 1336 txq->txq_inptr = GE_TXBUF_SIZE; 1337 txq->txq_outptr = 0; 1338 for (i = 0, txd = txq->txq_descs, 1339 addr = txq->txq_desc_busaddr + sizeof(*txd); 1340 i < GE_TXDESC_MAX - 1; 1341 i++, txd++, addr += sizeof(*txd)) { 1342 /* 1343 * update the nxtptr to point to the next txd. 1344 */ 1345 txd->ed_cmdsts = 0; 1346 txd->ed_nxtptr = htogt32(addr); 1347 } 1348 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1349 htogt32(txq->txq_desc_busaddr); 1350 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1351 GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1352 1353 switch (txprio) { 1354 case GE_TXPRIO_HI: 1355 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; 1356 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1357 txq->txq_epsrbits = ETH_EPSR_TxHigh; 1358 txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); 1359 GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); 1360 break; 1361 1362 case GE_TXPRIO_LO: 1363 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; 1364 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1365 txq->txq_epsrbits = ETH_EPSR_TxLow; 1366 txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); 1367 GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); 1368 break; 1369 1370 case GE_TXPRIO_NONE: 1371 break; 1372 } 1373 #if 0 1374 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1375 gt_write(sc->sc_dev.dv_parent, txq->txq_ectdp, txq->txq_desc_busaddr); 1376 GE_DPRINTF(sc, (")")); 1377 #endif 1378 1379 /* 1380 * If we are restarting, there may be packets in the pending queue 1381 * waiting to be enqueued. Try enqueuing packets from both priority 1382 * queues until the pending queue is empty or there no room for them 1383 * on the device. 1384 */ 1385 while (gfe_tx_enqueue(sc, txprio)) 1386 continue; 1387 1388 GE_FUNC_EXIT(sc, ""); 1389 return 0; 1390 } 1391 1392 void 1393 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1394 { 1395 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1396 1397 GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1398 if (txq == NULL) { 1399 GE_FUNC_EXIT(sc, ""); 1400 return; 1401 } 1402 1403 if (!flush) { 1404 GE_FUNC_EXIT(sc, ""); 1405 return; 1406 } 1407 1408 if ((sc->sc_flags & GE_NOFREE) == 0) { 1409 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1410 gfe_dmamem_free(sc, &txq->txq_buf_mem); 1411 } 1412 GE_FUNC_EXIT(sc, "-F"); 1413 } 1414 1415 void 1416 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1417 { 1418 GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1419 1420 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); 1421 1422 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1423 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1424 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1425 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1426 1427 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1428 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1429 1430 sc->sc_ec.ec_if.if_timer = 0; 1431 GE_FUNC_EXIT(sc, ""); 1432 } 1433 1434 int 1435 gfe_intr(void *arg) 1436 { 1437 struct gfe_softc * const sc = arg; 1438 uint32_t cause; 1439 uint32_t intrmask = sc->sc_intrmask; 1440 int claim = 0; 1441 int cnt; 1442 1443 GE_FUNC_ENTER(sc, "gfe_intr"); 1444 1445 for (cnt = 0; cnt < 4; cnt++) { 1446 if (sc->sc_intrmask != intrmask) { 1447 sc->sc_intrmask = intrmask; 1448 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1449 } 1450 cause = GE_READ(sc, EICR); 1451 cause &= sc->sc_intrmask; 1452 GE_DPRINTF(sc, (".%#x", cause)); 1453 if (cause == 0) 1454 break; 1455 1456 claim = 1; 1457 1458 GE_WRITE(sc, EICR, ~cause); 1459 #ifndef GE_NORX 1460 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) 1461 intrmask = gfe_rx_process(sc, cause, intrmask); 1462 #endif 1463 1464 #ifndef GE_NOTX 1465 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) 1466 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1467 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) 1468 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1469 #endif 1470 if (cause & ETH_IR_MIIPhySTC) { 1471 sc->sc_flags |= GE_PHYSTSCHG; 1472 /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1473 } 1474 } 1475 1476 while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) 1477 continue; 1478 while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) 1479 continue; 1480 1481 GE_FUNC_EXIT(sc, ""); 1482 return claim; 1483 } 1484 1485 int 1486 gfe_mii_mediachange (struct ifnet *ifp) 1487 { 1488 struct gfe_softc *sc = ifp->if_softc; 1489 1490 if (ifp->if_flags & IFF_UP) 1491 mii_mediachg(&sc->sc_mii); 1492 1493 return (0); 1494 } 1495 void 1496 gfe_mii_mediastatus (struct ifnet *ifp, struct ifmediareq *ifmr) 1497 { 1498 struct gfe_softc *sc = ifp->if_softc; 1499 1500 if (sc->sc_flags & GE_PHYSTSCHG) { 1501 sc->sc_flags &= ~GE_PHYSTSCHG; 1502 mii_pollstat(&sc->sc_mii); 1503 } 1504 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1505 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1506 } 1507 1508 int 1509 gfe_mii_read (struct device *self, int phy, int reg) 1510 { 1511 return gt_mii_read(self, self->dv_parent, phy, reg); 1512 } 1513 1514 void 1515 gfe_mii_write (struct device *self, int phy, int reg, int value) 1516 { 1517 gt_mii_write(self, self->dv_parent, phy, reg, value); 1518 } 1519 1520 void 1521 gfe_mii_statchg (struct device *self) 1522 { 1523 /* struct gfe_softc *sc = (struct gfe_softc *) self; */ 1524 /* do nothing? */ 1525 } 1526 1527 int 1528 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1529 { 1530 int error = 0; 1531 GE_FUNC_ENTER(sc, "gfe_whack"); 1532 1533 switch (op) { 1534 case GE_WHACK_RESTART: 1535 #ifndef GE_NOTX 1536 gfe_tx_stop(sc, op); 1537 #endif 1538 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1539 /* FALLTHROUGH */ 1540 case GE_WHACK_START: 1541 #ifndef GE_NOHASH 1542 if (error == 0 && sc->sc_hashtable == NULL) { 1543 error = gfe_hash_alloc(sc); 1544 if (error) 1545 break; 1546 } 1547 if (op != GE_WHACK_RESTART) 1548 gfe_hash_fill(sc); 1549 #endif 1550 #ifndef GE_NORX 1551 if (op != GE_WHACK_RESTART) { 1552 error = gfe_rx_prime(sc); 1553 if (error) 1554 break; 1555 } 1556 #endif 1557 #ifndef GE_NOTX 1558 error = gfe_tx_start(sc, GE_TXPRIO_HI); 1559 if (error) 1560 break; 1561 #endif 1562 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1563 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1564 GE_WRITE(sc, EPCXR, sc->sc_pcxr); 1565 GE_WRITE(sc, EICR, 0); 1566 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1567 #ifndef GE_NOHASH 1568 GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1569 #endif 1570 #ifndef GE_NORX 1571 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); 1572 sc->sc_flags |= GE_RXACTIVE; 1573 #endif 1574 /* FALLTHROUGH */ 1575 case GE_WHACK_CHANGE: 1576 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1577 GE_READ(sc, EPCR), GE_READ(sc, EIMR))); 1578 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1579 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1580 gfe_ifstart(&sc->sc_ec.ec_if); 1581 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1582 GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); 1583 GE_FUNC_EXIT(sc, ""); 1584 return error; 1585 case GE_WHACK_STOP: 1586 break; 1587 } 1588 1589 #ifdef GE_DEBUG 1590 if (error) 1591 GE_DPRINTF(sc, (" failed: %d\n", error)); 1592 #endif 1593 GE_WRITE(sc, EPCR, sc->sc_pcr); 1594 GE_WRITE(sc, EIMR, 0); 1595 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1596 #ifndef GE_NOTX 1597 gfe_tx_stop(sc, GE_WHACK_STOP); 1598 #endif 1599 #ifndef GE_NORX 1600 gfe_rx_stop(sc, GE_WHACK_STOP); 1601 #endif 1602 #ifndef GE_NOHASH 1603 if ((sc->sc_flags & GE_NOFREE) == 0) { 1604 gfe_dmamem_free(sc, &sc->sc_hash_mem); 1605 sc->sc_hashtable = NULL; 1606 } 1607 #endif 1608 1609 GE_FUNC_EXIT(sc, ""); 1610 return error; 1611 } 1612 1613 int 1614 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1615 { 1616 uint32_t w0, add0, add1; 1617 uint32_t result; 1618 1619 GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1620 add0 = ((uint32_t) eaddr[5] << 0) | 1621 ((uint32_t) eaddr[4] << 8) | 1622 ((uint32_t) eaddr[3] << 16); 1623 1624 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1625 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1626 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1627 1628 add1 = ((uint32_t) eaddr[2] << 0) | 1629 ((uint32_t) eaddr[1] << 8) | 1630 ((uint32_t) eaddr[0] << 16); 1631 1632 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1633 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1634 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1635 1636 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1637 /* 1638 * hashResult is the 15 bits Hash entry address. 1639 * ethernetADD is a 48 bit number, which is derived from the Ethernet 1640 * MAC address, by nibble swapping in every byte (i.e MAC address 1641 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1642 */ 1643 1644 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1645 /* 1646 * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1647 * 1648 * hashFunc0 calculates the hashResult in the following manner: 1649 * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1650 * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1651 */ 1652 result = (add0 & 3) | ((add0 >> 6) & ~3); 1653 result ^= (add0 >> 15) ^ (add1 >> 0); 1654 result &= 0x1ff; 1655 /* 1656 * hashResult[14:9] = ethernetADD[7:2] 1657 */ 1658 result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1659 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1660 } else { 1661 #define TRIBITFLIP 073516240 /* yes its in octal */ 1662 /* 1663 * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1664 * 1665 * hashFunc1 calculates the hashResult in the following manner: 1666 * hashResult[08:00] = ethernetADD[06:14] 1667 * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1668 */ 1669 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1670 /* 1671 * Now bitswap those 9 bits 1672 */ 1673 result = 0; 1674 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1675 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1676 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1677 1678 /* 1679 * hashResult[14:09] = ethernetADD[00:05] 1680 */ 1681 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1682 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1683 GE_DPRINTF(sc, ("1(%#x)", result)); 1684 } 1685 GE_FUNC_EXIT(sc, ""); 1686 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1687 } 1688 1689 int 1690 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1691 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1692 { 1693 uint64_t he; 1694 uint64_t *maybe_he_p = NULL; 1695 int limit; 1696 int hash; 1697 int maybe_hash = 0; 1698 1699 GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1700 1701 hash = gfe_hash_compute(sc, eaddr); 1702 1703 if (sc->sc_hashtable == NULL) { 1704 panic("%s:%d: hashtable == NULL!", sc->sc_dev.dv_xname, 1705 __LINE__); 1706 } 1707 1708 /* 1709 * Assume we are going to insert so create the hash entry we 1710 * are going to insert. We also use it to match entries we 1711 * will be removing. 1712 */ 1713 he = ((uint64_t) eaddr[5] << 43) | 1714 ((uint64_t) eaddr[4] << 35) | 1715 ((uint64_t) eaddr[3] << 27) | 1716 ((uint64_t) eaddr[2] << 19) | 1717 ((uint64_t) eaddr[1] << 11) | 1718 ((uint64_t) eaddr[0] << 3) | 1719 HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1720 1721 /* 1722 * The GT will search upto 12 entries for a hit, so we must mimic that. 1723 */ 1724 hash &= sc->sc_hashmask / sizeof(he); 1725 for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1726 /* 1727 * Does the GT wrap at the end, stop at the, or overrun the 1728 * end? Assume it wraps for now. Stash a copy of the 1729 * current hash entry. 1730 */ 1731 uint64_t *he_p = &sc->sc_hashtable[hash]; 1732 uint64_t thishe = *he_p; 1733 1734 /* 1735 * If the hash entry isn't valid, that break the chain. And 1736 * this entry a good candidate for reuse. 1737 */ 1738 if ((thishe & HSH_V) == 0) { 1739 maybe_he_p = he_p; 1740 break; 1741 } 1742 1743 /* 1744 * If the hash entry has the same address we are looking for 1745 * then ... if we are removing and the skip bit is set, its 1746 * already been removed. if are adding and the skip bit is 1747 * clear, then its already added. In either return EBUSY 1748 * indicating the op has already been done. Otherwise flip 1749 * the skip bit and return 0. 1750 */ 1751 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1752 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1753 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1754 return EBUSY; 1755 *he_p = thishe ^ HSH_S; 1756 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1757 hash * sizeof(he), sizeof(he), 1758 BUS_DMASYNC_PREWRITE); 1759 GE_FUNC_EXIT(sc, "^"); 1760 return 0; 1761 } 1762 1763 /* 1764 * If we haven't found a slot for the entry and this entry 1765 * is currently being skipped, return this entry. 1766 */ 1767 if (maybe_he_p == NULL && (thishe & HSH_S)) { 1768 maybe_he_p = he_p; 1769 maybe_hash = hash; 1770 } 1771 1772 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1773 } 1774 1775 /* 1776 * If we got here, then there was no entry to remove. 1777 */ 1778 if (op == GE_HASH_REMOVE) { 1779 GE_FUNC_EXIT(sc, "?"); 1780 return ENOENT; 1781 } 1782 1783 /* 1784 * If we couldn't find a slot, return an error. 1785 */ 1786 if (maybe_he_p == NULL) { 1787 GE_FUNC_EXIT(sc, "!"); 1788 return ENOSPC; 1789 } 1790 1791 /* Update the entry. 1792 */ 1793 *maybe_he_p = he; 1794 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1795 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1796 GE_FUNC_EXIT(sc, "+"); 1797 return 0; 1798 } 1799 1800 int 1801 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) 1802 { 1803 struct gfe_softc * const sc = ec->ec_if.if_softc; 1804 int error; 1805 enum gfe_hash_op op; 1806 enum gfe_rxprio prio; 1807 1808 GE_FUNC_ENTER(sc, "hash_multichg"); 1809 /* 1810 * Is this a wildcard entry? If so and its being removed, recompute. 1811 */ 1812 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1813 if (cmd == SIOCDELMULTI) { 1814 GE_FUNC_EXIT(sc, ""); 1815 return ENETRESET; 1816 } 1817 1818 /* 1819 * Switch in 1820 */ 1821 sc->sc_flags |= GE_ALLMULTI; 1822 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1823 sc->sc_pcr |= ETH_EPCR_PM; 1824 GE_WRITE(sc, EPCR, sc->sc_pcr); 1825 GE_FUNC_EXIT(sc, ""); 1826 return 0; 1827 } 1828 GE_FUNC_EXIT(sc, ""); 1829 return ENETRESET; 1830 } 1831 1832 prio = GE_RXPRIO_MEDLO; 1833 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1834 1835 if (sc->sc_hashtable == NULL) { 1836 GE_FUNC_EXIT(sc, ""); 1837 return 0; 1838 } 1839 1840 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1841 if (error == EBUSY) { 1842 printf("%s: multichg: tried to %s %s again\n", 1843 sc->sc_dev.dv_xname, 1844 cmd == SIOCDELMULTI ? "remove" : "add", 1845 ether_sprintf(enm->enm_addrlo)); 1846 GE_FUNC_EXIT(sc, ""); 1847 return 0; 1848 } 1849 1850 if (error == ENOENT) { 1851 printf("%s: multichg: failed to remove %s: not in table\n", 1852 sc->sc_dev.dv_xname, 1853 ether_sprintf(enm->enm_addrlo)); 1854 GE_FUNC_EXIT(sc, ""); 1855 return 0; 1856 } 1857 1858 if (error == ENOSPC) { 1859 printf("%s: multichg: failed to add %s: no space; regenerating table\n", 1860 sc->sc_dev.dv_xname, 1861 ether_sprintf(enm->enm_addrlo)); 1862 GE_FUNC_EXIT(sc, ""); 1863 return ENETRESET; 1864 } 1865 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 1866 sc->sc_dev.dv_xname, 1867 cmd == SIOCDELMULTI ? "remove" : "add", 1868 ether_sprintf(enm->enm_addrlo))); 1869 GE_FUNC_EXIT(sc, ""); 1870 return 0; 1871 } 1872 1873 int 1874 gfe_hash_fill(struct gfe_softc *sc) 1875 { 1876 struct ether_multistep step; 1877 struct ether_multi *enm; 1878 int error; 1879 1880 GE_FUNC_ENTER(sc, "gfe_hash_fill"); 1881 1882 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 1883 LLADDR(sc->sc_ec.ec_if.if_sadl)); 1884 if (error) 1885 GE_FUNC_EXIT(sc, "!"); 1886 return error; 1887 1888 sc->sc_flags &= ~GE_ALLMULTI; 1889 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) 1890 sc->sc_pcr &= ~ETH_EPCR_PM; 1891 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 1892 while (enm != NULL) { 1893 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1894 sc->sc_flags |= GE_ALLMULTI; 1895 sc->sc_pcr |= ETH_EPCR_PM; 1896 } else { 1897 error = gfe_hash_entry_op(sc, GE_HASH_ADD, 1898 GE_RXPRIO_MEDLO, enm->enm_addrlo); 1899 if (error == ENOSPC) 1900 break; 1901 } 1902 ETHER_NEXT_MULTI(step, enm); 1903 } 1904 1905 GE_FUNC_EXIT(sc, ""); 1906 return error; 1907 } 1908 1909 int 1910 gfe_hash_alloc(struct gfe_softc *sc) 1911 { 1912 int error; 1913 GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 1914 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 1915 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 1916 BUS_DMA_NOCACHE); 1917 if (error) { 1918 printf("%s: failed to allocate %d bytes for hash table: %d\n", 1919 sc->sc_dev.dv_xname, sc->sc_hashmask + 1, error); 1920 GE_FUNC_EXIT(sc, ""); 1921 return error; 1922 } 1923 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 1924 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 1925 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1926 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 1927 GE_FUNC_EXIT(sc, ""); 1928 return 0; 1929 } 1930