1 /* $NetBSD: if_gfe.c,v 1.31 2008/11/07 00:20:07 dyoung Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the NetBSD Project by 18 * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 * or promote products derived from this software without specific prior 21 * written permission. 22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * if_gfe.c -- GT ethernet MAC driver 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.31 2008/11/07 00:20:07 dyoung Exp $"); 46 47 #include "opt_inet.h" 48 #include "bpfilter.h" 49 50 #include <sys/param.h> 51 #include <sys/types.h> 52 #include <sys/inttypes.h> 53 #include <sys/queue.h> 54 55 #include <uvm/uvm_extern.h> 56 57 #include <sys/callout.h> 58 #include <sys/device.h> 59 #include <sys/errno.h> 60 #include <sys/ioctl.h> 61 #include <sys/mbuf.h> 62 #include <sys/socket.h> 63 64 #include <sys/bus.h> 65 66 #include <net/if.h> 67 #include <net/if_dl.h> 68 #include <net/if_ether.h> 69 #include <net/if_media.h> 70 71 #ifdef INET 72 #include <netinet/in.h> 73 #include <netinet/if_inarp.h> 74 #endif 75 #if NBPFILTER > 0 76 #include <net/bpf.h> 77 #endif 78 79 #include <dev/mii/miivar.h> 80 81 #include <dev/marvell/gtintrreg.h> 82 #include <dev/marvell/gtethreg.h> 83 84 #include <dev/marvell/gtvar.h> 85 #include <dev/marvell/if_gfevar.h> 86 87 #define GE_READ(sc, reg) \ 88 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) 89 #define GE_WRITE(sc, reg, v) \ 90 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) 91 92 #define GE_DEBUG 93 #if 0 94 #define GE_NOHASH 95 #define GE_NORX 96 #endif 97 98 #ifdef GE_DEBUG 99 #define GE_DPRINTF(sc, a) do \ 100 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 101 printf a; \ 102 while (0) 103 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 104 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 105 #else 106 #define GE_DPRINTF(sc, a) do { } while (0) 107 #define GE_FUNC_ENTER(sc, func) do { } while (0) 108 #define GE_FUNC_EXIT(sc, str) do { } while (0) 109 #endif 110 enum gfe_whack_op { 111 GE_WHACK_START, GE_WHACK_RESTART, 112 GE_WHACK_CHANGE, GE_WHACK_STOP 113 }; 114 115 enum gfe_hash_op { 116 GE_HASH_ADD, GE_HASH_REMOVE, 117 }; 118 119 #if 1 120 #define htogt32(a) htobe32(a) 121 #define gt32toh(a) be32toh(a) 122 #else 123 #define htogt32(a) htole32(a) 124 #define gt32toh(a) le32toh(a) 125 #endif 126 127 #define GE_RXDSYNC(sc, rxq, n, ops) \ 128 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 129 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 130 (ops)) 131 #define GE_RXDPRESYNC(sc, rxq, n) \ 132 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 133 #define GE_RXDPOSTSYNC(sc, rxq, n) \ 134 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 135 136 #define GE_TXDSYNC(sc, txq, n, ops) \ 137 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 138 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 139 (ops)) 140 #define GE_TXDPRESYNC(sc, txq, n) \ 141 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 142 #define GE_TXDPOSTSYNC(sc, txq, n) \ 143 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 144 145 #define STATIC 146 147 STATIC int gfe_match (struct device *, struct cfdata *, void *); 148 STATIC void gfe_attach (struct device *, struct device *, void *); 149 150 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 151 size_t, int); 152 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 153 154 STATIC int gfe_ifioctl (struct ifnet *, u_long, void *); 155 STATIC void gfe_ifstart (struct ifnet *); 156 STATIC void gfe_ifwatchdog (struct ifnet *); 157 158 STATIC int gfe_mii_read (struct device *, int, int); 159 STATIC void gfe_mii_write (struct device *, int, int, int); 160 STATIC void gfe_mii_statchg (struct device *); 161 162 STATIC void gfe_tick(void *arg); 163 164 STATIC void gfe_tx_restart(void *); 165 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 166 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 167 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 168 STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); 169 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 170 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 171 172 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 173 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 174 STATIC int gfe_rx_prime(struct gfe_softc *); 175 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 176 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 177 STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); 178 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 179 180 STATIC int gfe_intr(void *); 181 182 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 183 184 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 185 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 186 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 187 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 188 u_long); 189 STATIC int gfe_hash_fill(struct gfe_softc *); 190 STATIC int gfe_hash_alloc(struct gfe_softc *); 191 192 /* Linkup to the rest of the kernel */ 193 CFATTACH_DECL(gfe, sizeof(struct gfe_softc), 194 gfe_match, gfe_attach, NULL, NULL); 195 196 extern struct cfdriver gfe_cd; 197 198 int 199 gfe_match(struct device *parent, struct cfdata *cf, void *aux) 200 { 201 struct gt_softc *gt = (struct gt_softc *) parent; 202 struct gt_attach_args *ga = aux; 203 uint8_t enaddr[6]; 204 205 if (!GT_ETHEROK(gt, ga, &gfe_cd)) 206 return 0; 207 208 if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) 209 return 0; 210 211 if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && 212 enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) 213 return 0; 214 215 return 1; 216 } 217 218 /* 219 * Attach this instance, and then all the sub-devices 220 */ 221 void 222 gfe_attach(struct device *parent, struct device *self, void *aux) 223 { 224 struct gt_attach_args * const ga = aux; 225 struct gt_softc * const gt = device_private(parent); 226 struct gfe_softc * const sc = device_private(self); 227 struct ifnet * const ifp = &sc->sc_ec.ec_if; 228 uint32_t data; 229 uint8_t enaddr[6]; 230 int phyaddr; 231 uint32_t sdcr; 232 int error; 233 234 GT_ETHERFOUND(gt, ga); 235 236 sc->sc_gt_memt = ga->ga_memt; 237 sc->sc_gt_memh = ga->ga_memh; 238 sc->sc_dmat = ga->ga_dmat; 239 sc->sc_macno = ga->ga_unit; 240 241 if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, 242 ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { 243 aprint_error(": failed to map registers\n"); 244 } 245 246 callout_init(&sc->sc_co, 0); 247 248 data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); 249 phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); 250 251 gtget_macaddr(gt, sc->sc_macno, enaddr); 252 253 sc->sc_pcr = GE_READ(sc, EPCR); 254 sc->sc_pcxr = GE_READ(sc, EPCXR); 255 sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; 256 257 aprint_normal(": address %s", ether_sprintf(enaddr)); 258 259 #if defined(DEBUG) 260 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 261 #endif 262 263 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 264 if (device_cfdata(&sc->sc_dev)->cf_flags & 1) { 265 aprint_normal(", phy %d (rmii)", phyaddr); 266 sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 267 } else { 268 aprint_normal(", phy %d (mii)", phyaddr); 269 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 270 } 271 if (device_cfdata(&sc->sc_dev)->cf_flags & 2) 272 sc->sc_flags |= GE_NOFREE; 273 sc->sc_pcxr &= ~(3 << 14); 274 sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); 275 276 if (sc->sc_pcr & ETH_EPCR_EN) { 277 int tries = 1000; 278 /* 279 * Abort transmitter and receiver and wait for them to quiese 280 */ 281 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); 282 do { 283 delay(100); 284 } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); 285 } 286 287 sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 288 289 #if defined(DEBUG) 290 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 291 #endif 292 293 /* 294 * Now turn off the GT. If it didn't quiese, too ***ing bad. 295 */ 296 GE_WRITE(sc, EPCR, sc->sc_pcr); 297 GE_WRITE(sc, EIMR, sc->sc_intrmask); 298 sdcr = GE_READ(sc, ESDCR); 299 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 300 sdcr |= ETH_ESDCR_RIFB; 301 GE_WRITE(sc, ESDCR, sdcr); 302 sc->sc_max_frame_length = 1536; 303 304 aprint_normal("\n"); 305 sc->sc_mii.mii_ifp = ifp; 306 sc->sc_mii.mii_readreg = gfe_mii_read; 307 sc->sc_mii.mii_writereg = gfe_mii_write; 308 sc->sc_mii.mii_statchg = gfe_mii_statchg; 309 310 sc->sc_ec.ec_mii = &sc->sc_mii; 311 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 312 ether_mediastatus); 313 314 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, 315 MII_OFFSET_ANY, MIIF_NOISOLATE); 316 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 317 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 318 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 319 } else { 320 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 321 } 322 323 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 324 ifp->if_softc = sc; 325 /* ifp->if_mowner = &sc->sc_mowner; */ 326 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 327 #if 0 328 ifp->if_flags |= IFF_DEBUG; 329 #endif 330 ifp->if_ioctl = gfe_ifioctl; 331 ifp->if_start = gfe_ifstart; 332 ifp->if_watchdog = gfe_ifwatchdog; 333 334 if (sc->sc_flags & GE_NOFREE) { 335 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 336 if (!error) 337 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 338 if (!error) 339 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 340 if (!error) 341 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 342 if (!error) 343 error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); 344 if (!error) 345 error = gfe_hash_alloc(sc); 346 if (error) 347 aprint_error( 348 "%s: failed to allocate resources: %d\n", 349 ifp->if_xname, error); 350 } 351 352 if_attach(ifp); 353 ether_ifattach(ifp, enaddr); 354 #if NBPFILTER > 0 355 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 356 #endif 357 #if NRND > 0 358 rnd_attach_source(&sc->sc_rnd_source, device_xname(self), RND_TYPE_NET, 0); 359 #endif 360 intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, 361 gfe_intr, sc); 362 } 363 364 int 365 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 366 size_t size, int flags) 367 { 368 int error = 0; 369 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 370 371 KASSERT(gdm->gdm_kva == NULL); 372 gdm->gdm_size = size; 373 gdm->gdm_maxsegs = maxsegs; 374 375 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 376 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 377 BUS_DMA_NOWAIT); 378 if (error) 379 goto fail; 380 381 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 382 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 383 if (error) 384 goto fail; 385 386 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 387 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); 388 if (error) 389 goto fail; 390 391 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 392 gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 393 if (error) 394 goto fail; 395 396 /* invalidate from cache */ 397 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 398 BUS_DMASYNC_PREREAD); 399 fail: 400 if (error) { 401 gfe_dmamem_free(sc, gdm); 402 GE_DPRINTF(sc, (":err=%d", error)); 403 } 404 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 405 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 406 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 407 GE_FUNC_EXIT(sc, ""); 408 return error; 409 } 410 411 void 412 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 413 { 414 GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 415 if (gdm->gdm_map) 416 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 417 if (gdm->gdm_kva) 418 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 419 if (gdm->gdm_nsegs > 0) 420 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 421 gdm->gdm_map = NULL; 422 gdm->gdm_kva = NULL; 423 gdm->gdm_nsegs = 0; 424 GE_FUNC_EXIT(sc, ""); 425 } 426 427 int 428 gfe_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 429 { 430 struct gfe_softc * const sc = ifp->if_softc; 431 struct ifreq *ifr = (struct ifreq *) data; 432 struct ifaddr *ifa = (struct ifaddr *) data; 433 int s, error = 0; 434 435 GE_FUNC_ENTER(sc, "gfe_ifioctl"); 436 s = splnet(); 437 438 switch (cmd) { 439 case SIOCINITIFADDR: 440 ifp->if_flags |= IFF_UP; 441 error = gfe_whack(sc, GE_WHACK_START); 442 switch (ifa->ifa_addr->sa_family) { 443 #ifdef INET 444 case AF_INET: 445 if (error == 0) 446 arp_ifinit(ifp, ifa); 447 break; 448 #endif 449 default: 450 break; 451 } 452 break; 453 454 case SIOCSIFFLAGS: 455 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 456 break; 457 /* XXX re-use ether_ioctl() */ 458 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 459 case IFF_UP|IFF_RUNNING:/* active->active, update */ 460 error = gfe_whack(sc, GE_WHACK_CHANGE); 461 break; 462 case IFF_RUNNING: /* not up, so we stop */ 463 error = gfe_whack(sc, GE_WHACK_STOP); 464 break; 465 case IFF_UP: /* not running, so we start */ 466 error = gfe_whack(sc, GE_WHACK_START); 467 break; 468 case 0: /* idle->idle: do nothing */ 469 break; 470 } 471 break; 472 473 case SIOCSIFMEDIA: 474 case SIOCGIFMEDIA: 475 case SIOCADDMULTI: 476 case SIOCDELMULTI: 477 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 478 if (ifp->if_flags & IFF_RUNNING) 479 error = gfe_whack(sc, GE_WHACK_CHANGE); 480 else 481 error = 0; 482 } 483 break; 484 485 case SIOCSIFMTU: 486 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 487 error = EINVAL; 488 break; 489 } 490 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 491 error = 0; 492 break; 493 494 default: 495 error = ether_ioctl(ifp, cmd, data); 496 break; 497 } 498 splx(s); 499 GE_FUNC_EXIT(sc, ""); 500 return error; 501 } 502 503 void 504 gfe_ifstart(struct ifnet *ifp) 505 { 506 struct gfe_softc * const sc = ifp->if_softc; 507 struct mbuf *m; 508 509 GE_FUNC_ENTER(sc, "gfe_ifstart"); 510 511 if ((ifp->if_flags & IFF_RUNNING) == 0) { 512 GE_FUNC_EXIT(sc, "$"); 513 return; 514 } 515 516 for (;;) { 517 IF_DEQUEUE(&ifp->if_snd, m); 518 if (m == NULL) { 519 ifp->if_flags &= ~IFF_OACTIVE; 520 GE_FUNC_EXIT(sc, ""); 521 return; 522 } 523 524 /* 525 * No space in the pending queue? try later. 526 */ 527 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) 528 break; 529 530 /* 531 * Try to enqueue a mbuf to the device. If that fails, we 532 * can always try to map the next mbuf. 533 */ 534 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); 535 GE_DPRINTF(sc, (">")); 536 #ifndef GE_NOTX 537 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 538 #endif 539 } 540 541 /* 542 * Attempt to queue the mbuf for send failed. 543 */ 544 IF_PREPEND(&ifp->if_snd, m); 545 ifp->if_flags |= IFF_OACTIVE; 546 GE_FUNC_EXIT(sc, "%%"); 547 } 548 549 void 550 gfe_ifwatchdog(struct ifnet *ifp) 551 { 552 struct gfe_softc * const sc = ifp->if_softc; 553 struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; 554 555 GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 556 printf("%s: device timeout", device_xname(&sc->sc_dev)); 557 if (ifp->if_flags & IFF_RUNNING) { 558 uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 559 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 560 GE_TXDPOSTSYNC(sc, txq, curtxdnum); 561 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 562 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 563 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 564 GE_READ(sc, EICR)); 565 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 566 GE_TXDPRESYNC(sc, txq, curtxdnum); 567 } 568 printf("\n"); 569 ifp->if_oerrors++; 570 (void) gfe_whack(sc, GE_WHACK_RESTART); 571 GE_FUNC_EXIT(sc, ""); 572 } 573 574 int 575 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 576 { 577 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 578 int error; 579 580 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 581 GE_DPRINTF(sc, ("(%d)", rxprio)); 582 583 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 584 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 585 if (error) { 586 GE_FUNC_EXIT(sc, "!!"); 587 return error; 588 } 589 590 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 591 GE_RXBUF_MEMSIZE, 0); 592 if (error) { 593 GE_FUNC_EXIT(sc, "!!!"); 594 return error; 595 } 596 GE_FUNC_EXIT(sc, ""); 597 return error; 598 } 599 600 int 601 gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) 602 { 603 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 604 volatile struct gt_eth_desc *rxd; 605 const bus_dma_segment_t *ds; 606 int idx; 607 bus_addr_t nxtaddr; 608 bus_size_t boff; 609 610 GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); 611 GE_DPRINTF(sc, ("(%d)", rxprio)); 612 613 if ((sc->sc_flags & GE_NOFREE) == 0) { 614 int error = gfe_rx_rxqalloc(sc, rxprio); 615 if (error) { 616 GE_FUNC_EXIT(sc, "!"); 617 return error; 618 } 619 } else { 620 KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); 621 KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); 622 } 623 624 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); 625 626 rxq->rxq_descs = 627 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 628 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 629 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 630 rxq->rxq_fi = 0; 631 rxq->rxq_active = GE_RXDESC_MAX; 632 for (idx = 0, rxd = rxq->rxq_descs, 633 boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, 634 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 635 idx < GE_RXDESC_MAX; 636 idx++, rxd++, nxtaddr += sizeof(*rxd)) { 637 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 638 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 639 rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 640 /* 641 * update the nxtptr to point to the next txd. 642 */ 643 if (idx == GE_RXDESC_MAX - 1) 644 nxtaddr = rxq->rxq_desc_busaddr; 645 rxd->ed_nxtptr = htogt32(nxtaddr); 646 boff += GE_RXBUF_SIZE; 647 if (boff == ds->ds_len) { 648 ds++; 649 boff = 0; 650 } 651 } 652 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 653 rxq->rxq_desc_mem.gdm_map->dm_mapsize, 654 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 655 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 656 rxq->rxq_buf_mem.gdm_map->dm_mapsize, 657 BUS_DMASYNC_PREREAD); 658 659 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; 660 switch (rxprio) { 661 case GE_RXPRIO_HI: 662 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; 663 rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); 664 rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); 665 break; 666 case GE_RXPRIO_MEDHI: 667 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; 668 rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); 669 rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); 670 break; 671 case GE_RXPRIO_MEDLO: 672 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; 673 rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); 674 rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); 675 break; 676 case GE_RXPRIO_LO: 677 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; 678 rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); 679 rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); 680 break; 681 } 682 GE_FUNC_EXIT(sc, ""); 683 return 0; 684 } 685 686 void 687 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 688 { 689 struct ifnet * const ifp = &sc->sc_ec.ec_if; 690 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 691 struct mbuf *m = rxq->rxq_curpkt; 692 693 GE_FUNC_ENTER(sc, "gfe_rx_get"); 694 GE_DPRINTF(sc, ("(%d)", rxprio)); 695 696 while (rxq->rxq_active > 0) { 697 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 698 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 699 const struct ether_header *eh; 700 unsigned int cmdsts; 701 size_t buflen; 702 703 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 704 cmdsts = gt32toh(rxd->ed_cmdsts); 705 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 706 rxq->rxq_cmdsts = cmdsts; 707 /* 708 * Sometimes the GE "forgets" to reset the ownership bit. 709 * But if the length has been rewritten, the packet is ours 710 * so pretend the O bit is set. 711 */ 712 buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 713 if ((cmdsts & RX_CMD_O) && buflen == 0) { 714 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 715 break; 716 } 717 718 /* 719 * If this is not a single buffer packet with no errors 720 * or for some reason it's bigger than our frame size, 721 * ignore it and go to the next packet. 722 */ 723 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != 724 (RX_CMD_F|RX_CMD_L) || 725 buflen > sc->sc_max_frame_length) { 726 GE_DPRINTF(sc, ("!")); 727 --rxq->rxq_active; 728 ifp->if_ipackets++; 729 ifp->if_ierrors++; 730 goto give_it_back; 731 } 732 733 /* CRC is included with the packet; trim it off. */ 734 buflen -= ETHER_CRC_LEN; 735 736 if (m == NULL) { 737 MGETHDR(m, M_DONTWAIT, MT_DATA); 738 if (m == NULL) { 739 GE_DPRINTF(sc, ("?")); 740 break; 741 } 742 } 743 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 744 MCLGET(m, M_DONTWAIT); 745 if ((m->m_flags & M_EXT) == 0) { 746 GE_DPRINTF(sc, ("?")); 747 break; 748 } 749 } 750 m->m_data += 2; 751 m->m_len = 0; 752 m->m_pkthdr.len = 0; 753 m->m_pkthdr.rcvif = ifp; 754 rxq->rxq_cmdsts = cmdsts; 755 --rxq->rxq_active; 756 757 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 758 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 759 760 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 761 memcpy(m->m_data + m->m_len, rxb->rxb_data, buflen); 762 m->m_len = buflen; 763 m->m_pkthdr.len = buflen; 764 765 ifp->if_ipackets++; 766 #if NBPFILTER > 0 767 if (ifp->if_bpf != NULL) 768 bpf_mtap(ifp->if_bpf, m); 769 #endif 770 771 eh = (const struct ether_header *) m->m_data; 772 if ((ifp->if_flags & IFF_PROMISC) || 773 (rxq->rxq_cmdsts & RX_STS_M) == 0 || 774 (rxq->rxq_cmdsts & RX_STS_HE) || 775 (eh->ether_dhost[0] & 1) != 0 || 776 memcmp(eh->ether_dhost, CLLADDR(ifp->if_sadl), 777 ETHER_ADDR_LEN) == 0) { 778 (*ifp->if_input)(ifp, m); 779 m = NULL; 780 GE_DPRINTF(sc, (">")); 781 } else { 782 m->m_len = 0; 783 m->m_pkthdr.len = 0; 784 GE_DPRINTF(sc, ("+")); 785 } 786 rxq->rxq_cmdsts = 0; 787 788 give_it_back: 789 rxd->ed_lencnt &= ~0xffff; /* zero out length */ 790 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 791 #if 0 792 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 793 rxq->rxq_fi, 794 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 795 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 796 #endif 797 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 798 if (++rxq->rxq_fi == GE_RXDESC_MAX) 799 rxq->rxq_fi = 0; 800 rxq->rxq_active++; 801 } 802 rxq->rxq_curpkt = m; 803 GE_FUNC_EXIT(sc, ""); 804 } 805 806 uint32_t 807 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 808 { 809 struct ifnet * const ifp = &sc->sc_ec.ec_if; 810 struct gfe_rxqueue *rxq; 811 uint32_t rxbits; 812 #define RXPRIO_DECODER 0xffffaa50 813 GE_FUNC_ENTER(sc, "gfe_rx_process"); 814 815 rxbits = ETH_IR_RxBuffer_GET(cause); 816 while (rxbits) { 817 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 818 GE_DPRINTF(sc, ("%1x", rxbits)); 819 rxbits &= ~(1 << rxprio); 820 gfe_rx_get(sc, rxprio); 821 } 822 823 rxbits = ETH_IR_RxError_GET(cause); 824 while (rxbits) { 825 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 826 uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 827 int idx; 828 rxbits &= ~(1 << rxprio); 829 rxq = &sc->sc_rxq[rxprio]; 830 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 831 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 832 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 833 sc->sc_tickflags |= GE_TICK_RX_RESTART; 834 callout_reset(&sc->sc_co, 1, gfe_tick, sc); 835 } 836 ifp->if_ierrors++; 837 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 838 device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi)); 839 memset(masks, 0, sizeof(masks)); 840 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 841 0, rxq->rxq_desc_mem.gdm_size, 842 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 843 for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 844 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 845 846 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 847 masks[idx/32] |= 1 << (idx & 31); 848 } 849 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 850 0, rxq->rxq_desc_mem.gdm_size, 851 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 852 #if defined(DEBUG) 853 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 854 device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi, 855 rxq->rxq_cmdsts, masks[0], masks[1]); 856 #endif 857 } 858 if ((intrmask & ETH_IR_RxBits) == 0) 859 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); 860 861 GE_FUNC_EXIT(sc, ""); 862 return intrmask; 863 } 864 865 int 866 gfe_rx_prime(struct gfe_softc *sc) 867 { 868 struct gfe_rxqueue *rxq; 869 int error; 870 871 GE_FUNC_ENTER(sc, "gfe_rx_prime"); 872 873 error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); 874 if (error) 875 goto bail; 876 rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 877 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 878 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 879 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 880 } 881 sc->sc_intrmask |= rxq->rxq_intrbits; 882 883 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); 884 if (error) 885 goto bail; 886 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 887 rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 888 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 889 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 890 sc->sc_intrmask |= rxq->rxq_intrbits; 891 } 892 893 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); 894 if (error) 895 goto bail; 896 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 897 rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 898 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 899 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 900 sc->sc_intrmask |= rxq->rxq_intrbits; 901 } 902 903 error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); 904 if (error) 905 goto bail; 906 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 907 rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 908 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 909 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 910 sc->sc_intrmask |= rxq->rxq_intrbits; 911 } 912 913 bail: 914 GE_FUNC_EXIT(sc, ""); 915 return error; 916 } 917 918 void 919 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 920 { 921 struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; 922 GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 923 if (rxq == NULL) { 924 GE_FUNC_EXIT(sc, ""); 925 return; 926 } 927 928 if (rxq->rxq_curpkt) 929 m_freem(rxq->rxq_curpkt); 930 if ((sc->sc_flags & GE_NOFREE) == 0) { 931 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 932 gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 933 } 934 GE_FUNC_EXIT(sc, ""); 935 } 936 937 void 938 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 939 { 940 GE_FUNC_ENTER(sc, "gfe_rx_stop"); 941 sc->sc_flags &= ~GE_RXACTIVE; 942 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 943 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 944 GE_WRITE(sc, EIMR, sc->sc_intrmask); 945 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); 946 do { 947 delay(10); 948 } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); 949 gfe_rx_cleanup(sc, GE_RXPRIO_HI); 950 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 951 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 952 gfe_rx_cleanup(sc, GE_RXPRIO_LO); 953 GE_FUNC_EXIT(sc, ""); 954 } 955 956 void 957 gfe_tick(void *arg) 958 { 959 struct gfe_softc * const sc = arg; 960 uint32_t intrmask; 961 unsigned int tickflags; 962 int s; 963 964 GE_FUNC_ENTER(sc, "gfe_tick"); 965 966 s = splnet(); 967 968 tickflags = sc->sc_tickflags; 969 sc->sc_tickflags = 0; 970 intrmask = sc->sc_intrmask; 971 if (tickflags & GE_TICK_TX_IFSTART) 972 gfe_ifstart(&sc->sc_ec.ec_if); 973 if (tickflags & GE_TICK_RX_RESTART) { 974 intrmask |= sc->sc_idlemask; 975 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { 976 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 977 rxq->rxq_fi = 0; 978 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 979 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 980 } 981 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { 982 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 983 rxq->rxq_fi = 0; 984 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 985 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 986 } 987 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { 988 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 989 rxq->rxq_fi = 0; 990 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 991 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 992 } 993 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { 994 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 995 rxq->rxq_fi = 0; 996 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 997 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 998 } 999 sc->sc_idlemask = 0; 1000 } 1001 if (intrmask != sc->sc_intrmask) { 1002 sc->sc_intrmask = intrmask; 1003 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1004 } 1005 gfe_intr(sc); 1006 splx(s); 1007 1008 GE_FUNC_EXIT(sc, ""); 1009 } 1010 1011 int 1012 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 1013 { 1014 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1015 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1016 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1017 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 1018 uint32_t intrmask = sc->sc_intrmask; 1019 size_t buflen; 1020 struct mbuf *m; 1021 1022 GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1023 1024 /* 1025 * Anything in the pending queue to enqueue? if not, punt. Likewise 1026 * if the txq is not yet created. 1027 * otherwise grab its dmamap. 1028 */ 1029 if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { 1030 GE_FUNC_EXIT(sc, "-"); 1031 return 0; 1032 } 1033 1034 /* 1035 * Have we [over]consumed our limit of descriptors? 1036 * Do we have enough free descriptors? 1037 */ 1038 if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1039 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1040 uint32_t cmdsts; 1041 size_t pktlen; 1042 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1043 cmdsts = gt32toh(txd2->ed_cmdsts); 1044 if (cmdsts & TX_CMD_O) { 1045 int nextin; 1046 /* 1047 * Sometime the Discovery forgets to update the 1048 * last descriptor. See if we own the descriptor 1049 * after it (since we know we've turned that to 1050 * the discovery and if we owned it, the Discovery 1051 * gave it back). If we do, we know the Discovery 1052 * gave back this one but forgot to mark it as ours. 1053 */ 1054 nextin = txq->txq_fi + 1; 1055 if (nextin == GE_TXDESC_MAX) 1056 nextin = 0; 1057 GE_TXDPOSTSYNC(sc, txq, nextin); 1058 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1059 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1060 GE_TXDPRESYNC(sc, txq, nextin); 1061 GE_FUNC_EXIT(sc, "@"); 1062 return 0; 1063 } 1064 #ifdef DEBUG 1065 printf("%s: txenqueue: transmitter resynced at %d\n", 1066 device_xname(&sc->sc_dev), txq->txq_fi); 1067 #endif 1068 } 1069 if (++txq->txq_fi == GE_TXDESC_MAX) 1070 txq->txq_fi = 0; 1071 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1072 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1073 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1074 txq->txq_nactive--; 1075 1076 /* statistics */ 1077 ifp->if_opackets++; 1078 if (cmdsts & TX_STS_ES) 1079 ifp->if_oerrors++; 1080 GE_DPRINTF(sc, ("%%")); 1081 } 1082 1083 buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1084 1085 /* 1086 * If this packet would wrap around the end of the buffer, reset back 1087 * to the beginning. 1088 */ 1089 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1090 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1091 txq->txq_outptr = 0; 1092 } 1093 1094 /* 1095 * Make sure the output packet doesn't run over the beginning of 1096 * what we've already given the GT. 1097 */ 1098 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1099 txq->txq_outptr + buflen > txq->txq_inptr) { 1100 intrmask |= txq->txq_intrbits & 1101 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); 1102 if (sc->sc_intrmask != intrmask) { 1103 sc->sc_intrmask = intrmask; 1104 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1105 } 1106 GE_FUNC_EXIT(sc, "#"); 1107 return 0; 1108 } 1109 1110 /* 1111 * The end-of-list descriptor we put on last time is the starting point 1112 * for this packet. The GT is supposed to terminate list processing on 1113 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1114 * must terminate the list. 1115 */ 1116 intrmask = sc->sc_intrmask; 1117 1118 m_copydata(m, 0, m->m_pkthdr.len, 1119 (char *)txq->txq_buf_mem.gdm_kva + (int)txq->txq_outptr); 1120 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1121 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1122 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1123 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1124 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1125 1126 /* 1127 * Request a buffer interrupt every 2/3 of the way thru the transmit 1128 * buffer. 1129 */ 1130 txq->txq_ei_gapcount += buflen; 1131 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1132 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); 1133 txq->txq_ei_gapcount = 0; 1134 } else { 1135 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); 1136 } 1137 #if 0 1138 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1139 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1140 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1141 #endif 1142 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1143 1144 txq->txq_outptr += buflen; 1145 /* 1146 * Tell the SDMA engine to "Fetch!" 1147 */ 1148 GE_WRITE(sc, ESDCMR, 1149 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); 1150 1151 GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1152 1153 /* 1154 * Update the last out appropriately. 1155 */ 1156 txq->txq_nactive++; 1157 if (++txq->txq_lo == GE_TXDESC_MAX) 1158 txq->txq_lo = 0; 1159 1160 /* 1161 * Move mbuf from the pending queue to the snd queue. 1162 */ 1163 IF_DEQUEUE(&txq->txq_pendq, m); 1164 #if NBPFILTER > 0 1165 if (ifp->if_bpf != NULL) 1166 bpf_mtap(ifp->if_bpf, m); 1167 #endif 1168 m_freem(m); 1169 ifp->if_flags &= ~IFF_OACTIVE; 1170 1171 /* 1172 * Since we have put an item into the packet queue, we now want 1173 * an interrupt when the transmit queue finishes processing the 1174 * list. But only update the mask if needs changing. 1175 */ 1176 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); 1177 if (sc->sc_intrmask != intrmask) { 1178 sc->sc_intrmask = intrmask; 1179 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1180 } 1181 if (ifp->if_timer == 0) 1182 ifp->if_timer = 5; 1183 GE_FUNC_EXIT(sc, "*"); 1184 return 1; 1185 } 1186 1187 uint32_t 1188 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1189 { 1190 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1191 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1192 1193 GE_FUNC_ENTER(sc, "gfe_tx_done"); 1194 1195 if (txq == NULL) { 1196 GE_FUNC_EXIT(sc, ""); 1197 return intrmask; 1198 } 1199 1200 while (txq->txq_nactive > 0) { 1201 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1202 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1203 uint32_t cmdsts; 1204 size_t pktlen; 1205 1206 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1207 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1208 int nextin; 1209 1210 if (txq->txq_nactive == 1) { 1211 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1212 GE_FUNC_EXIT(sc, ""); 1213 return intrmask; 1214 } 1215 /* 1216 * Sometimes the Discovery forgets to update the 1217 * ownership bit in the descriptor. See if we own the 1218 * descriptor after it (since we know we've turned 1219 * that to the Discovery and if we own it now then the 1220 * Discovery gave it back). If we do, we know the 1221 * Discovery gave back this one but forgot to mark it 1222 * as ours. 1223 */ 1224 nextin = txq->txq_fi + 1; 1225 if (nextin == GE_TXDESC_MAX) 1226 nextin = 0; 1227 GE_TXDPOSTSYNC(sc, txq, nextin); 1228 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1229 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1230 GE_TXDPRESYNC(sc, txq, nextin); 1231 GE_FUNC_EXIT(sc, ""); 1232 return intrmask; 1233 } 1234 #ifdef DEBUG 1235 printf("%s: txdone: transmitter resynced at %d\n", 1236 device_xname(&sc->sc_dev), txq->txq_fi); 1237 #endif 1238 } 1239 #if 0 1240 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1241 txq->txq_lo, 1242 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1243 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1244 #endif 1245 GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1246 if (++txq->txq_fi == GE_TXDESC_MAX) 1247 txq->txq_fi = 0; 1248 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1249 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1250 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1251 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1252 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1253 1254 /* statistics */ 1255 ifp->if_opackets++; 1256 if (cmdsts & TX_STS_ES) 1257 ifp->if_oerrors++; 1258 1259 /* txd->ed_bufptr = 0; */ 1260 1261 ifp->if_timer = 5; 1262 --txq->txq_nactive; 1263 } 1264 if (txq->txq_nactive != 0) 1265 panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1266 device_xname(&sc->sc_dev), txprio, txq->txq_nactive); 1267 ifp->if_timer = 0; 1268 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); 1269 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); 1270 GE_FUNC_EXIT(sc, ""); 1271 return intrmask; 1272 } 1273 1274 int 1275 gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) 1276 { 1277 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1278 int error; 1279 1280 GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); 1281 1282 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1283 GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); 1284 if (error) { 1285 GE_FUNC_EXIT(sc, ""); 1286 return error; 1287 } 1288 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); 1289 if (error) { 1290 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1291 GE_FUNC_EXIT(sc, ""); 1292 return error; 1293 } 1294 GE_FUNC_EXIT(sc, ""); 1295 return 0; 1296 } 1297 1298 int 1299 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1300 { 1301 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1302 volatile struct gt_eth_desc *txd; 1303 unsigned int i; 1304 bus_addr_t addr; 1305 1306 GE_FUNC_ENTER(sc, "gfe_tx_start"); 1307 1308 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1309 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1310 1311 if (sc->sc_flags & GE_NOFREE) { 1312 KASSERT(txq->txq_desc_mem.gdm_kva != NULL); 1313 KASSERT(txq->txq_buf_mem.gdm_kva != NULL); 1314 } else { 1315 int error = gfe_tx_txqalloc(sc, txprio); 1316 if (error) { 1317 GE_FUNC_EXIT(sc, "!"); 1318 return error; 1319 } 1320 } 1321 1322 txq->txq_descs = 1323 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1324 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1325 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1326 1327 txq->txq_pendq.ifq_maxlen = 10; 1328 txq->txq_ei_gapcount = 0; 1329 txq->txq_nactive = 0; 1330 txq->txq_fi = 0; 1331 txq->txq_lo = 0; 1332 txq->txq_inptr = GE_TXBUF_SIZE; 1333 txq->txq_outptr = 0; 1334 for (i = 0, txd = txq->txq_descs, 1335 addr = txq->txq_desc_busaddr + sizeof(*txd); 1336 i < GE_TXDESC_MAX - 1; 1337 i++, txd++, addr += sizeof(*txd)) { 1338 /* 1339 * update the nxtptr to point to the next txd. 1340 */ 1341 txd->ed_cmdsts = 0; 1342 txd->ed_nxtptr = htogt32(addr); 1343 } 1344 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1345 htogt32(txq->txq_desc_busaddr); 1346 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1347 GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1348 1349 switch (txprio) { 1350 case GE_TXPRIO_HI: 1351 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; 1352 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1353 txq->txq_epsrbits = ETH_EPSR_TxHigh; 1354 txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); 1355 GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); 1356 break; 1357 1358 case GE_TXPRIO_LO: 1359 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; 1360 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1361 txq->txq_epsrbits = ETH_EPSR_TxLow; 1362 txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); 1363 GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); 1364 break; 1365 1366 case GE_TXPRIO_NONE: 1367 break; 1368 } 1369 #if 0 1370 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1371 gt_write(device_parent(&sc->sc_dev), txq->txq_ectdp, 1372 txq->txq_desc_busaddr); 1373 GE_DPRINTF(sc, (")")); 1374 #endif 1375 1376 /* 1377 * If we are restarting, there may be packets in the pending queue 1378 * waiting to be enqueued. Try enqueuing packets from both priority 1379 * queues until the pending queue is empty or there no room for them 1380 * on the device. 1381 */ 1382 while (gfe_tx_enqueue(sc, txprio)) 1383 continue; 1384 1385 GE_FUNC_EXIT(sc, ""); 1386 return 0; 1387 } 1388 1389 void 1390 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1391 { 1392 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1393 1394 GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1395 if (txq == NULL) { 1396 GE_FUNC_EXIT(sc, ""); 1397 return; 1398 } 1399 1400 if (!flush) { 1401 GE_FUNC_EXIT(sc, ""); 1402 return; 1403 } 1404 1405 if ((sc->sc_flags & GE_NOFREE) == 0) { 1406 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1407 gfe_dmamem_free(sc, &txq->txq_buf_mem); 1408 } 1409 GE_FUNC_EXIT(sc, "-F"); 1410 } 1411 1412 void 1413 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1414 { 1415 GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1416 1417 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); 1418 1419 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1420 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1421 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1422 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1423 1424 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1425 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1426 1427 sc->sc_ec.ec_if.if_timer = 0; 1428 GE_FUNC_EXIT(sc, ""); 1429 } 1430 1431 int 1432 gfe_intr(void *arg) 1433 { 1434 struct gfe_softc * const sc = arg; 1435 uint32_t cause; 1436 uint32_t intrmask = sc->sc_intrmask; 1437 int claim = 0; 1438 int cnt; 1439 1440 GE_FUNC_ENTER(sc, "gfe_intr"); 1441 1442 for (cnt = 0; cnt < 4; cnt++) { 1443 if (sc->sc_intrmask != intrmask) { 1444 sc->sc_intrmask = intrmask; 1445 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1446 } 1447 cause = GE_READ(sc, EICR); 1448 cause &= sc->sc_intrmask; 1449 GE_DPRINTF(sc, (".%#x", cause)); 1450 if (cause == 0) 1451 break; 1452 1453 claim = 1; 1454 1455 GE_WRITE(sc, EICR, ~cause); 1456 #ifndef GE_NORX 1457 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) 1458 intrmask = gfe_rx_process(sc, cause, intrmask); 1459 #endif 1460 1461 #ifndef GE_NOTX 1462 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) 1463 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1464 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) 1465 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1466 #endif 1467 if (cause & ETH_IR_MIIPhySTC) { 1468 sc->sc_flags |= GE_PHYSTSCHG; 1469 /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1470 } 1471 } 1472 1473 while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) 1474 continue; 1475 while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) 1476 continue; 1477 1478 GE_FUNC_EXIT(sc, ""); 1479 return claim; 1480 } 1481 1482 int 1483 gfe_mii_read (struct device *self, int phy, int reg) 1484 { 1485 return gt_mii_read(self, device_parent(self), phy, reg); 1486 } 1487 1488 void 1489 gfe_mii_write (struct device *self, int phy, int reg, int value) 1490 { 1491 gt_mii_write(self, device_parent(self), phy, reg, value); 1492 } 1493 1494 void 1495 gfe_mii_statchg (struct device *self) 1496 { 1497 /* struct gfe_softc *sc = device_private(self); */ 1498 /* do nothing? */ 1499 } 1500 1501 int 1502 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1503 { 1504 int error = 0; 1505 GE_FUNC_ENTER(sc, "gfe_whack"); 1506 1507 switch (op) { 1508 case GE_WHACK_RESTART: 1509 #ifndef GE_NOTX 1510 gfe_tx_stop(sc, op); 1511 #endif 1512 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1513 /* FALLTHROUGH */ 1514 case GE_WHACK_START: 1515 #ifndef GE_NOHASH 1516 if (error == 0 && sc->sc_hashtable == NULL) { 1517 error = gfe_hash_alloc(sc); 1518 if (error) 1519 break; 1520 } 1521 if (op != GE_WHACK_RESTART) 1522 gfe_hash_fill(sc); 1523 #endif 1524 #ifndef GE_NORX 1525 if (op != GE_WHACK_RESTART) { 1526 error = gfe_rx_prime(sc); 1527 if (error) 1528 break; 1529 } 1530 #endif 1531 #ifndef GE_NOTX 1532 error = gfe_tx_start(sc, GE_TXPRIO_HI); 1533 if (error) 1534 break; 1535 #endif 1536 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1537 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1538 GE_WRITE(sc, EPCXR, sc->sc_pcxr); 1539 GE_WRITE(sc, EICR, 0); 1540 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1541 #ifndef GE_NOHASH 1542 GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1543 #endif 1544 #ifndef GE_NORX 1545 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); 1546 sc->sc_flags |= GE_RXACTIVE; 1547 #endif 1548 /* FALLTHROUGH */ 1549 case GE_WHACK_CHANGE: 1550 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1551 GE_READ(sc, EPCR), GE_READ(sc, EIMR))); 1552 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1553 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1554 gfe_ifstart(&sc->sc_ec.ec_if); 1555 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1556 GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); 1557 GE_FUNC_EXIT(sc, ""); 1558 return error; 1559 case GE_WHACK_STOP: 1560 break; 1561 } 1562 1563 #ifdef GE_DEBUG 1564 if (error) 1565 GE_DPRINTF(sc, (" failed: %d\n", error)); 1566 #endif 1567 GE_WRITE(sc, EPCR, sc->sc_pcr); 1568 GE_WRITE(sc, EIMR, 0); 1569 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1570 #ifndef GE_NOTX 1571 gfe_tx_stop(sc, GE_WHACK_STOP); 1572 #endif 1573 #ifndef GE_NORX 1574 gfe_rx_stop(sc, GE_WHACK_STOP); 1575 #endif 1576 #ifndef GE_NOHASH 1577 if ((sc->sc_flags & GE_NOFREE) == 0) { 1578 gfe_dmamem_free(sc, &sc->sc_hash_mem); 1579 sc->sc_hashtable = NULL; 1580 } 1581 #endif 1582 1583 GE_FUNC_EXIT(sc, ""); 1584 return error; 1585 } 1586 1587 int 1588 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1589 { 1590 uint32_t w0, add0, add1; 1591 uint32_t result; 1592 1593 GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1594 add0 = ((uint32_t) eaddr[5] << 0) | 1595 ((uint32_t) eaddr[4] << 8) | 1596 ((uint32_t) eaddr[3] << 16); 1597 1598 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1599 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1600 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1601 1602 add1 = ((uint32_t) eaddr[2] << 0) | 1603 ((uint32_t) eaddr[1] << 8) | 1604 ((uint32_t) eaddr[0] << 16); 1605 1606 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1607 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1608 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1609 1610 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1611 /* 1612 * hashResult is the 15 bits Hash entry address. 1613 * ethernetADD is a 48 bit number, which is derived from the Ethernet 1614 * MAC address, by nibble swapping in every byte (i.e MAC address 1615 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1616 */ 1617 1618 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1619 /* 1620 * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1621 * 1622 * hashFunc0 calculates the hashResult in the following manner: 1623 * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1624 * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1625 */ 1626 result = (add0 & 3) | ((add0 >> 6) & ~3); 1627 result ^= (add0 >> 15) ^ (add1 >> 0); 1628 result &= 0x1ff; 1629 /* 1630 * hashResult[14:9] = ethernetADD[7:2] 1631 */ 1632 result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1633 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1634 } else { 1635 #define TRIBITFLIP 073516240 /* yes its in octal */ 1636 /* 1637 * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1638 * 1639 * hashFunc1 calculates the hashResult in the following manner: 1640 * hashResult[08:00] = ethernetADD[06:14] 1641 * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1642 */ 1643 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1644 /* 1645 * Now bitswap those 9 bits 1646 */ 1647 result = 0; 1648 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1649 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1650 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1651 1652 /* 1653 * hashResult[14:09] = ethernetADD[00:05] 1654 */ 1655 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1656 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1657 GE_DPRINTF(sc, ("1(%#x)", result)); 1658 } 1659 GE_FUNC_EXIT(sc, ""); 1660 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1661 } 1662 1663 int 1664 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1665 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1666 { 1667 uint64_t he; 1668 uint64_t *maybe_he_p = NULL; 1669 int limit; 1670 int hash; 1671 int maybe_hash = 0; 1672 1673 GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1674 1675 hash = gfe_hash_compute(sc, eaddr); 1676 1677 if (sc->sc_hashtable == NULL) { 1678 panic("%s:%d: hashtable == NULL!", device_xname(&sc->sc_dev), 1679 __LINE__); 1680 } 1681 1682 /* 1683 * Assume we are going to insert so create the hash entry we 1684 * are going to insert. We also use it to match entries we 1685 * will be removing. 1686 */ 1687 he = ((uint64_t) eaddr[5] << 43) | 1688 ((uint64_t) eaddr[4] << 35) | 1689 ((uint64_t) eaddr[3] << 27) | 1690 ((uint64_t) eaddr[2] << 19) | 1691 ((uint64_t) eaddr[1] << 11) | 1692 ((uint64_t) eaddr[0] << 3) | 1693 HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1694 1695 /* 1696 * The GT will search upto 12 entries for a hit, so we must mimic that. 1697 */ 1698 hash &= sc->sc_hashmask / sizeof(he); 1699 for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1700 /* 1701 * Does the GT wrap at the end, stop at the, or overrun the 1702 * end? Assume it wraps for now. Stash a copy of the 1703 * current hash entry. 1704 */ 1705 uint64_t *he_p = &sc->sc_hashtable[hash]; 1706 uint64_t thishe = *he_p; 1707 1708 /* 1709 * If the hash entry isn't valid, that break the chain. And 1710 * this entry a good candidate for reuse. 1711 */ 1712 if ((thishe & HSH_V) == 0) { 1713 maybe_he_p = he_p; 1714 break; 1715 } 1716 1717 /* 1718 * If the hash entry has the same address we are looking for 1719 * then ... if we are removing and the skip bit is set, its 1720 * already been removed. if are adding and the skip bit is 1721 * clear, then its already added. In either return EBUSY 1722 * indicating the op has already been done. Otherwise flip 1723 * the skip bit and return 0. 1724 */ 1725 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1726 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1727 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1728 return EBUSY; 1729 *he_p = thishe ^ HSH_S; 1730 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1731 hash * sizeof(he), sizeof(he), 1732 BUS_DMASYNC_PREWRITE); 1733 GE_FUNC_EXIT(sc, "^"); 1734 return 0; 1735 } 1736 1737 /* 1738 * If we haven't found a slot for the entry and this entry 1739 * is currently being skipped, return this entry. 1740 */ 1741 if (maybe_he_p == NULL && (thishe & HSH_S)) { 1742 maybe_he_p = he_p; 1743 maybe_hash = hash; 1744 } 1745 1746 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1747 } 1748 1749 /* 1750 * If we got here, then there was no entry to remove. 1751 */ 1752 if (op == GE_HASH_REMOVE) { 1753 GE_FUNC_EXIT(sc, "?"); 1754 return ENOENT; 1755 } 1756 1757 /* 1758 * If we couldn't find a slot, return an error. 1759 */ 1760 if (maybe_he_p == NULL) { 1761 GE_FUNC_EXIT(sc, "!"); 1762 return ENOSPC; 1763 } 1764 1765 /* Update the entry. 1766 */ 1767 *maybe_he_p = he; 1768 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1769 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1770 GE_FUNC_EXIT(sc, "+"); 1771 return 0; 1772 } 1773 1774 int 1775 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) 1776 { 1777 struct gfe_softc * const sc = ec->ec_if.if_softc; 1778 int error; 1779 enum gfe_hash_op op; 1780 enum gfe_rxprio prio; 1781 1782 GE_FUNC_ENTER(sc, "hash_multichg"); 1783 /* 1784 * Is this a wildcard entry? If so and its being removed, recompute. 1785 */ 1786 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1787 if (cmd == SIOCDELMULTI) { 1788 GE_FUNC_EXIT(sc, ""); 1789 return ENETRESET; 1790 } 1791 1792 /* 1793 * Switch in 1794 */ 1795 sc->sc_flags |= GE_ALLMULTI; 1796 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1797 sc->sc_pcr |= ETH_EPCR_PM; 1798 GE_WRITE(sc, EPCR, sc->sc_pcr); 1799 GE_FUNC_EXIT(sc, ""); 1800 return 0; 1801 } 1802 GE_FUNC_EXIT(sc, ""); 1803 return ENETRESET; 1804 } 1805 1806 prio = GE_RXPRIO_MEDLO; 1807 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1808 1809 if (sc->sc_hashtable == NULL) { 1810 GE_FUNC_EXIT(sc, ""); 1811 return 0; 1812 } 1813 1814 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1815 if (error == EBUSY) { 1816 printf("%s: multichg: tried to %s %s again\n", 1817 device_xname(&sc->sc_dev), 1818 cmd == SIOCDELMULTI ? "remove" : "add", 1819 ether_sprintf(enm->enm_addrlo)); 1820 GE_FUNC_EXIT(sc, ""); 1821 return 0; 1822 } 1823 1824 if (error == ENOENT) { 1825 printf("%s: multichg: failed to remove %s: not in table\n", 1826 device_xname(&sc->sc_dev), 1827 ether_sprintf(enm->enm_addrlo)); 1828 GE_FUNC_EXIT(sc, ""); 1829 return 0; 1830 } 1831 1832 if (error == ENOSPC) { 1833 printf("%s: multichg: failed to add %s: no space; regenerating table\n", 1834 device_xname(&sc->sc_dev), 1835 ether_sprintf(enm->enm_addrlo)); 1836 GE_FUNC_EXIT(sc, ""); 1837 return ENETRESET; 1838 } 1839 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 1840 device_xname(&sc->sc_dev), 1841 cmd == SIOCDELMULTI ? "remove" : "add", 1842 ether_sprintf(enm->enm_addrlo))); 1843 GE_FUNC_EXIT(sc, ""); 1844 return 0; 1845 } 1846 1847 int 1848 gfe_hash_fill(struct gfe_softc *sc) 1849 { 1850 struct ether_multistep step; 1851 struct ether_multi *enm; 1852 int error; 1853 1854 GE_FUNC_ENTER(sc, "gfe_hash_fill"); 1855 1856 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 1857 CLLADDR(sc->sc_ec.ec_if.if_sadl)); 1858 if (error) 1859 GE_FUNC_EXIT(sc, "!"); 1860 return error; 1861 1862 sc->sc_flags &= ~GE_ALLMULTI; 1863 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) 1864 sc->sc_pcr &= ~ETH_EPCR_PM; 1865 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 1866 while (enm != NULL) { 1867 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1868 sc->sc_flags |= GE_ALLMULTI; 1869 sc->sc_pcr |= ETH_EPCR_PM; 1870 } else { 1871 error = gfe_hash_entry_op(sc, GE_HASH_ADD, 1872 GE_RXPRIO_MEDLO, enm->enm_addrlo); 1873 if (error == ENOSPC) 1874 break; 1875 } 1876 ETHER_NEXT_MULTI(step, enm); 1877 } 1878 1879 GE_FUNC_EXIT(sc, ""); 1880 return error; 1881 } 1882 1883 int 1884 gfe_hash_alloc(struct gfe_softc *sc) 1885 { 1886 int error; 1887 GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 1888 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 1889 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 1890 BUS_DMA_NOCACHE); 1891 if (error) { 1892 printf("%s: failed to allocate %d bytes for hash table: %d\n", 1893 device_xname(&sc->sc_dev), sc->sc_hashmask + 1, error); 1894 GE_FUNC_EXIT(sc, ""); 1895 return error; 1896 } 1897 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 1898 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 1899 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1900 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 1901 GE_FUNC_EXIT(sc, ""); 1902 return 0; 1903 } 1904