1 /* $NetBSD: if_gfe.c,v 1.34 2010/01/19 22:06:59 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the NetBSD Project by 18 * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 * or promote products derived from this software without specific prior 21 * written permission. 22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * if_gfe.c -- GT ethernet MAC driver 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.34 2010/01/19 22:06:59 pooka Exp $"); 46 47 #include "opt_inet.h" 48 49 #include <sys/param.h> 50 #include <sys/types.h> 51 #include <sys/inttypes.h> 52 #include <sys/queue.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <sys/callout.h> 57 #include <sys/device.h> 58 #include <sys/errno.h> 59 #include <sys/ioctl.h> 60 #include <sys/mbuf.h> 61 #include <sys/socket.h> 62 63 #include <sys/bus.h> 64 65 #include <net/if.h> 66 #include <net/if_dl.h> 67 #include <net/if_ether.h> 68 #include <net/if_media.h> 69 70 #ifdef INET 71 #include <netinet/in.h> 72 #include <netinet/if_inarp.h> 73 #endif 74 #include <net/bpf.h> 75 76 #include <dev/mii/miivar.h> 77 78 #include <dev/marvell/gtintrreg.h> 79 #include <dev/marvell/gtethreg.h> 80 81 #include <dev/marvell/gtvar.h> 82 #include <dev/marvell/if_gfevar.h> 83 84 #define GE_READ(sc, reg) \ 85 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) 86 #define GE_WRITE(sc, reg, v) \ 87 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) 88 89 #define GE_DEBUG 90 #if 0 91 #define GE_NOHASH 92 #define GE_NORX 93 #endif 94 95 #ifdef GE_DEBUG 96 #define GE_DPRINTF(sc, a) do \ 97 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 98 printf a; \ 99 while (0) 100 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 101 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 102 #else 103 #define GE_DPRINTF(sc, a) do { } while (0) 104 #define GE_FUNC_ENTER(sc, func) do { } while (0) 105 #define GE_FUNC_EXIT(sc, str) do { } while (0) 106 #endif 107 enum gfe_whack_op { 108 GE_WHACK_START, GE_WHACK_RESTART, 109 GE_WHACK_CHANGE, GE_WHACK_STOP 110 }; 111 112 enum gfe_hash_op { 113 GE_HASH_ADD, GE_HASH_REMOVE, 114 }; 115 116 #if 1 117 #define htogt32(a) htobe32(a) 118 #define gt32toh(a) be32toh(a) 119 #else 120 #define htogt32(a) htole32(a) 121 #define gt32toh(a) le32toh(a) 122 #endif 123 124 #define GE_RXDSYNC(sc, rxq, n, ops) \ 125 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 126 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 127 (ops)) 128 #define GE_RXDPRESYNC(sc, rxq, n) \ 129 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 130 #define GE_RXDPOSTSYNC(sc, rxq, n) \ 131 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 132 133 #define GE_TXDSYNC(sc, txq, n, ops) \ 134 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 135 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 136 (ops)) 137 #define GE_TXDPRESYNC(sc, txq, n) \ 138 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 139 #define GE_TXDPOSTSYNC(sc, txq, n) \ 140 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 141 142 #define STATIC 143 144 STATIC int gfe_match (device_t, cfdata_t, void *); 145 STATIC void gfe_attach (device_t, device_t, void *); 146 147 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 148 size_t, int); 149 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 150 151 STATIC int gfe_ifioctl (struct ifnet *, u_long, void *); 152 STATIC void gfe_ifstart (struct ifnet *); 153 STATIC void gfe_ifwatchdog (struct ifnet *); 154 155 STATIC int gfe_mii_read (device_t, int, int); 156 STATIC void gfe_mii_write (device_t, int, int, int); 157 STATIC void gfe_mii_statchg (device_t); 158 159 STATIC void gfe_tick(void *arg); 160 161 STATIC void gfe_tx_restart(void *); 162 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 163 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 164 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 165 STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); 166 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 167 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 168 169 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 170 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 171 STATIC int gfe_rx_prime(struct gfe_softc *); 172 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 173 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 174 STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); 175 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 176 177 STATIC int gfe_intr(void *); 178 179 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 180 181 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 182 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 183 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 184 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 185 u_long); 186 STATIC int gfe_hash_fill(struct gfe_softc *); 187 STATIC int gfe_hash_alloc(struct gfe_softc *); 188 189 /* Linkup to the rest of the kernel */ 190 CFATTACH_DECL(gfe, sizeof(struct gfe_softc), 191 gfe_match, gfe_attach, NULL, NULL); 192 193 extern struct cfdriver gfe_cd; 194 195 int 196 gfe_match(device_t parent, cfdata_t cf, void *aux) 197 { 198 struct gt_softc *gt = (struct gt_softc *) parent; 199 struct gt_attach_args *ga = aux; 200 uint8_t enaddr[6]; 201 202 if (!GT_ETHEROK(gt, ga, &gfe_cd)) 203 return 0; 204 205 if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) 206 return 0; 207 208 if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && 209 enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) 210 return 0; 211 212 return 1; 213 } 214 215 /* 216 * Attach this instance, and then all the sub-devices 217 */ 218 void 219 gfe_attach(device_t parent, device_t self, void *aux) 220 { 221 struct gt_attach_args * const ga = aux; 222 struct gt_softc * const gt = device_private(parent); 223 struct gfe_softc * const sc = device_private(self); 224 struct ifnet * const ifp = &sc->sc_ec.ec_if; 225 uint32_t data; 226 uint8_t enaddr[6]; 227 int phyaddr; 228 uint32_t sdcr; 229 int error; 230 231 GT_ETHERFOUND(gt, ga); 232 233 sc->sc_gt_memt = ga->ga_memt; 234 sc->sc_gt_memh = ga->ga_memh; 235 sc->sc_dmat = ga->ga_dmat; 236 sc->sc_macno = ga->ga_unit; 237 238 if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, 239 ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { 240 aprint_error(": failed to map registers\n"); 241 } 242 243 callout_init(&sc->sc_co, 0); 244 245 data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); 246 phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); 247 248 gtget_macaddr(gt, sc->sc_macno, enaddr); 249 250 sc->sc_pcr = GE_READ(sc, EPCR); 251 sc->sc_pcxr = GE_READ(sc, EPCXR); 252 sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; 253 254 aprint_normal(": address %s", ether_sprintf(enaddr)); 255 256 #if defined(DEBUG) 257 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 258 #endif 259 260 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 261 if (device_cfdata(&sc->sc_dev)->cf_flags & 1) { 262 aprint_normal(", phy %d (rmii)", phyaddr); 263 sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 264 } else { 265 aprint_normal(", phy %d (mii)", phyaddr); 266 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 267 } 268 if (device_cfdata(&sc->sc_dev)->cf_flags & 2) 269 sc->sc_flags |= GE_NOFREE; 270 sc->sc_pcxr &= ~(3 << 14); 271 sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); 272 273 if (sc->sc_pcr & ETH_EPCR_EN) { 274 int tries = 1000; 275 /* 276 * Abort transmitter and receiver and wait for them to quiese 277 */ 278 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); 279 do { 280 delay(100); 281 } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); 282 } 283 284 sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 285 286 #if defined(DEBUG) 287 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 288 #endif 289 290 /* 291 * Now turn off the GT. If it didn't quiese, too ***ing bad. 292 */ 293 GE_WRITE(sc, EPCR, sc->sc_pcr); 294 GE_WRITE(sc, EIMR, sc->sc_intrmask); 295 sdcr = GE_READ(sc, ESDCR); 296 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 297 sdcr |= ETH_ESDCR_RIFB; 298 GE_WRITE(sc, ESDCR, sdcr); 299 sc->sc_max_frame_length = 1536; 300 301 aprint_normal("\n"); 302 sc->sc_mii.mii_ifp = ifp; 303 sc->sc_mii.mii_readreg = gfe_mii_read; 304 sc->sc_mii.mii_writereg = gfe_mii_write; 305 sc->sc_mii.mii_statchg = gfe_mii_statchg; 306 307 sc->sc_ec.ec_mii = &sc->sc_mii; 308 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 309 ether_mediastatus); 310 311 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, 312 MII_OFFSET_ANY, MIIF_NOISOLATE); 313 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 314 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 315 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 316 } else { 317 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 318 } 319 320 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 321 ifp->if_softc = sc; 322 /* ifp->if_mowner = &sc->sc_mowner; */ 323 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 324 #if 0 325 ifp->if_flags |= IFF_DEBUG; 326 #endif 327 ifp->if_ioctl = gfe_ifioctl; 328 ifp->if_start = gfe_ifstart; 329 ifp->if_watchdog = gfe_ifwatchdog; 330 331 if (sc->sc_flags & GE_NOFREE) { 332 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 333 if (!error) 334 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 335 if (!error) 336 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 337 if (!error) 338 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 339 if (!error) 340 error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); 341 if (!error) 342 error = gfe_hash_alloc(sc); 343 if (error) 344 aprint_error( 345 "%s: failed to allocate resources: %d\n", 346 ifp->if_xname, error); 347 } 348 349 if_attach(ifp); 350 ether_ifattach(ifp, enaddr); 351 bpf_ops->bpf_attach(ifp, DLT_EN10MB, 352 sizeof(struct ether_header), &ifp->if_bpf); 353 #if NRND > 0 354 rnd_attach_source(&sc->sc_rnd_source, device_xname(self), RND_TYPE_NET, 0); 355 #endif 356 intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, 357 gfe_intr, sc); 358 } 359 360 int 361 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 362 size_t size, int flags) 363 { 364 int error = 0; 365 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 366 367 KASSERT(gdm->gdm_kva == NULL); 368 gdm->gdm_size = size; 369 gdm->gdm_maxsegs = maxsegs; 370 371 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 372 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 373 BUS_DMA_NOWAIT); 374 if (error) 375 goto fail; 376 377 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 378 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 379 if (error) 380 goto fail; 381 382 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 383 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); 384 if (error) 385 goto fail; 386 387 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 388 gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 389 if (error) 390 goto fail; 391 392 /* invalidate from cache */ 393 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 394 BUS_DMASYNC_PREREAD); 395 fail: 396 if (error) { 397 gfe_dmamem_free(sc, gdm); 398 GE_DPRINTF(sc, (":err=%d", error)); 399 } 400 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 401 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 402 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 403 GE_FUNC_EXIT(sc, ""); 404 return error; 405 } 406 407 void 408 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 409 { 410 GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 411 if (gdm->gdm_map) 412 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 413 if (gdm->gdm_kva) 414 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 415 if (gdm->gdm_nsegs > 0) 416 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 417 gdm->gdm_map = NULL; 418 gdm->gdm_kva = NULL; 419 gdm->gdm_nsegs = 0; 420 GE_FUNC_EXIT(sc, ""); 421 } 422 423 int 424 gfe_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 425 { 426 struct gfe_softc * const sc = ifp->if_softc; 427 struct ifreq *ifr = (struct ifreq *) data; 428 struct ifaddr *ifa = (struct ifaddr *) data; 429 int s, error = 0; 430 431 GE_FUNC_ENTER(sc, "gfe_ifioctl"); 432 s = splnet(); 433 434 switch (cmd) { 435 case SIOCINITIFADDR: 436 ifp->if_flags |= IFF_UP; 437 error = gfe_whack(sc, GE_WHACK_START); 438 switch (ifa->ifa_addr->sa_family) { 439 #ifdef INET 440 case AF_INET: 441 if (error == 0) 442 arp_ifinit(ifp, ifa); 443 break; 444 #endif 445 default: 446 break; 447 } 448 break; 449 450 case SIOCSIFFLAGS: 451 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 452 break; 453 /* XXX re-use ether_ioctl() */ 454 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 455 case IFF_UP|IFF_RUNNING:/* active->active, update */ 456 error = gfe_whack(sc, GE_WHACK_CHANGE); 457 break; 458 case IFF_RUNNING: /* not up, so we stop */ 459 error = gfe_whack(sc, GE_WHACK_STOP); 460 break; 461 case IFF_UP: /* not running, so we start */ 462 error = gfe_whack(sc, GE_WHACK_START); 463 break; 464 case 0: /* idle->idle: do nothing */ 465 break; 466 } 467 break; 468 469 case SIOCSIFMEDIA: 470 case SIOCGIFMEDIA: 471 case SIOCADDMULTI: 472 case SIOCDELMULTI: 473 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 474 if (ifp->if_flags & IFF_RUNNING) 475 error = gfe_whack(sc, GE_WHACK_CHANGE); 476 else 477 error = 0; 478 } 479 break; 480 481 case SIOCSIFMTU: 482 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 483 error = EINVAL; 484 break; 485 } 486 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 487 error = 0; 488 break; 489 490 default: 491 error = ether_ioctl(ifp, cmd, data); 492 break; 493 } 494 splx(s); 495 GE_FUNC_EXIT(sc, ""); 496 return error; 497 } 498 499 void 500 gfe_ifstart(struct ifnet *ifp) 501 { 502 struct gfe_softc * const sc = ifp->if_softc; 503 struct mbuf *m; 504 505 GE_FUNC_ENTER(sc, "gfe_ifstart"); 506 507 if ((ifp->if_flags & IFF_RUNNING) == 0) { 508 GE_FUNC_EXIT(sc, "$"); 509 return; 510 } 511 512 for (;;) { 513 IF_DEQUEUE(&ifp->if_snd, m); 514 if (m == NULL) { 515 ifp->if_flags &= ~IFF_OACTIVE; 516 GE_FUNC_EXIT(sc, ""); 517 return; 518 } 519 520 /* 521 * No space in the pending queue? try later. 522 */ 523 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) 524 break; 525 526 /* 527 * Try to enqueue a mbuf to the device. If that fails, we 528 * can always try to map the next mbuf. 529 */ 530 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); 531 GE_DPRINTF(sc, (">")); 532 #ifndef GE_NOTX 533 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 534 #endif 535 } 536 537 /* 538 * Attempt to queue the mbuf for send failed. 539 */ 540 IF_PREPEND(&ifp->if_snd, m); 541 ifp->if_flags |= IFF_OACTIVE; 542 GE_FUNC_EXIT(sc, "%%"); 543 } 544 545 void 546 gfe_ifwatchdog(struct ifnet *ifp) 547 { 548 struct gfe_softc * const sc = ifp->if_softc; 549 struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; 550 551 GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 552 printf("%s: device timeout", device_xname(&sc->sc_dev)); 553 if (ifp->if_flags & IFF_RUNNING) { 554 uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 555 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 556 GE_TXDPOSTSYNC(sc, txq, curtxdnum); 557 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 558 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 559 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 560 GE_READ(sc, EICR)); 561 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 562 GE_TXDPRESYNC(sc, txq, curtxdnum); 563 } 564 printf("\n"); 565 ifp->if_oerrors++; 566 (void) gfe_whack(sc, GE_WHACK_RESTART); 567 GE_FUNC_EXIT(sc, ""); 568 } 569 570 int 571 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 572 { 573 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 574 int error; 575 576 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 577 GE_DPRINTF(sc, ("(%d)", rxprio)); 578 579 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 580 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 581 if (error) { 582 GE_FUNC_EXIT(sc, "!!"); 583 return error; 584 } 585 586 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 587 GE_RXBUF_MEMSIZE, 0); 588 if (error) { 589 GE_FUNC_EXIT(sc, "!!!"); 590 return error; 591 } 592 GE_FUNC_EXIT(sc, ""); 593 return error; 594 } 595 596 int 597 gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) 598 { 599 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 600 volatile struct gt_eth_desc *rxd; 601 const bus_dma_segment_t *ds; 602 int idx; 603 bus_addr_t nxtaddr; 604 bus_size_t boff; 605 606 GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); 607 GE_DPRINTF(sc, ("(%d)", rxprio)); 608 609 if ((sc->sc_flags & GE_NOFREE) == 0) { 610 int error = gfe_rx_rxqalloc(sc, rxprio); 611 if (error) { 612 GE_FUNC_EXIT(sc, "!"); 613 return error; 614 } 615 } else { 616 KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); 617 KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); 618 } 619 620 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); 621 622 rxq->rxq_descs = 623 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 624 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 625 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 626 rxq->rxq_fi = 0; 627 rxq->rxq_active = GE_RXDESC_MAX; 628 for (idx = 0, rxd = rxq->rxq_descs, 629 boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, 630 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 631 idx < GE_RXDESC_MAX; 632 idx++, rxd++, nxtaddr += sizeof(*rxd)) { 633 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 634 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 635 rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 636 /* 637 * update the nxtptr to point to the next txd. 638 */ 639 if (idx == GE_RXDESC_MAX - 1) 640 nxtaddr = rxq->rxq_desc_busaddr; 641 rxd->ed_nxtptr = htogt32(nxtaddr); 642 boff += GE_RXBUF_SIZE; 643 if (boff == ds->ds_len) { 644 ds++; 645 boff = 0; 646 } 647 } 648 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 649 rxq->rxq_desc_mem.gdm_map->dm_mapsize, 650 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 651 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 652 rxq->rxq_buf_mem.gdm_map->dm_mapsize, 653 BUS_DMASYNC_PREREAD); 654 655 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; 656 switch (rxprio) { 657 case GE_RXPRIO_HI: 658 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; 659 rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); 660 rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); 661 break; 662 case GE_RXPRIO_MEDHI: 663 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; 664 rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); 665 rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); 666 break; 667 case GE_RXPRIO_MEDLO: 668 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; 669 rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); 670 rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); 671 break; 672 case GE_RXPRIO_LO: 673 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; 674 rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); 675 rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); 676 break; 677 } 678 GE_FUNC_EXIT(sc, ""); 679 return 0; 680 } 681 682 void 683 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 684 { 685 struct ifnet * const ifp = &sc->sc_ec.ec_if; 686 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 687 struct mbuf *m = rxq->rxq_curpkt; 688 689 GE_FUNC_ENTER(sc, "gfe_rx_get"); 690 GE_DPRINTF(sc, ("(%d)", rxprio)); 691 692 while (rxq->rxq_active > 0) { 693 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 694 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 695 const struct ether_header *eh; 696 unsigned int cmdsts; 697 size_t buflen; 698 699 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 700 cmdsts = gt32toh(rxd->ed_cmdsts); 701 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 702 rxq->rxq_cmdsts = cmdsts; 703 /* 704 * Sometimes the GE "forgets" to reset the ownership bit. 705 * But if the length has been rewritten, the packet is ours 706 * so pretend the O bit is set. 707 */ 708 buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 709 if ((cmdsts & RX_CMD_O) && buflen == 0) { 710 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 711 break; 712 } 713 714 /* 715 * If this is not a single buffer packet with no errors 716 * or for some reason it's bigger than our frame size, 717 * ignore it and go to the next packet. 718 */ 719 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != 720 (RX_CMD_F|RX_CMD_L) || 721 buflen > sc->sc_max_frame_length) { 722 GE_DPRINTF(sc, ("!")); 723 --rxq->rxq_active; 724 ifp->if_ipackets++; 725 ifp->if_ierrors++; 726 goto give_it_back; 727 } 728 729 /* CRC is included with the packet; trim it off. */ 730 buflen -= ETHER_CRC_LEN; 731 732 if (m == NULL) { 733 MGETHDR(m, M_DONTWAIT, MT_DATA); 734 if (m == NULL) { 735 GE_DPRINTF(sc, ("?")); 736 break; 737 } 738 } 739 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 740 MCLGET(m, M_DONTWAIT); 741 if ((m->m_flags & M_EXT) == 0) { 742 GE_DPRINTF(sc, ("?")); 743 break; 744 } 745 } 746 m->m_data += 2; 747 m->m_len = 0; 748 m->m_pkthdr.len = 0; 749 m->m_pkthdr.rcvif = ifp; 750 rxq->rxq_cmdsts = cmdsts; 751 --rxq->rxq_active; 752 753 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 754 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 755 756 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 757 memcpy(m->m_data + m->m_len, rxb->rxb_data, buflen); 758 m->m_len = buflen; 759 m->m_pkthdr.len = buflen; 760 761 ifp->if_ipackets++; 762 if (ifp->if_bpf != NULL) 763 bpf_ops->bpf_mtap(ifp->if_bpf, m); 764 765 eh = (const struct ether_header *) m->m_data; 766 if ((ifp->if_flags & IFF_PROMISC) || 767 (rxq->rxq_cmdsts & RX_STS_M) == 0 || 768 (rxq->rxq_cmdsts & RX_STS_HE) || 769 (eh->ether_dhost[0] & 1) != 0 || 770 memcmp(eh->ether_dhost, CLLADDR(ifp->if_sadl), 771 ETHER_ADDR_LEN) == 0) { 772 (*ifp->if_input)(ifp, m); 773 m = NULL; 774 GE_DPRINTF(sc, (">")); 775 } else { 776 m->m_len = 0; 777 m->m_pkthdr.len = 0; 778 GE_DPRINTF(sc, ("+")); 779 } 780 rxq->rxq_cmdsts = 0; 781 782 give_it_back: 783 rxd->ed_lencnt &= ~0xffff; /* zero out length */ 784 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 785 #if 0 786 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 787 rxq->rxq_fi, 788 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 789 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 790 #endif 791 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 792 if (++rxq->rxq_fi == GE_RXDESC_MAX) 793 rxq->rxq_fi = 0; 794 rxq->rxq_active++; 795 } 796 rxq->rxq_curpkt = m; 797 GE_FUNC_EXIT(sc, ""); 798 } 799 800 uint32_t 801 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 802 { 803 struct ifnet * const ifp = &sc->sc_ec.ec_if; 804 struct gfe_rxqueue *rxq; 805 uint32_t rxbits; 806 #define RXPRIO_DECODER 0xffffaa50 807 GE_FUNC_ENTER(sc, "gfe_rx_process"); 808 809 rxbits = ETH_IR_RxBuffer_GET(cause); 810 while (rxbits) { 811 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 812 GE_DPRINTF(sc, ("%1x", rxbits)); 813 rxbits &= ~(1 << rxprio); 814 gfe_rx_get(sc, rxprio); 815 } 816 817 rxbits = ETH_IR_RxError_GET(cause); 818 while (rxbits) { 819 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 820 uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 821 int idx; 822 rxbits &= ~(1 << rxprio); 823 rxq = &sc->sc_rxq[rxprio]; 824 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 825 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 826 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 827 sc->sc_tickflags |= GE_TICK_RX_RESTART; 828 callout_reset(&sc->sc_co, 1, gfe_tick, sc); 829 } 830 ifp->if_ierrors++; 831 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 832 device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi)); 833 memset(masks, 0, sizeof(masks)); 834 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 835 0, rxq->rxq_desc_mem.gdm_size, 836 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 837 for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 838 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 839 840 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 841 masks[idx/32] |= 1 << (idx & 31); 842 } 843 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 844 0, rxq->rxq_desc_mem.gdm_size, 845 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 846 #if defined(DEBUG) 847 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 848 device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi, 849 rxq->rxq_cmdsts, masks[0], masks[1]); 850 #endif 851 } 852 if ((intrmask & ETH_IR_RxBits) == 0) 853 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); 854 855 GE_FUNC_EXIT(sc, ""); 856 return intrmask; 857 } 858 859 int 860 gfe_rx_prime(struct gfe_softc *sc) 861 { 862 struct gfe_rxqueue *rxq; 863 int error; 864 865 GE_FUNC_ENTER(sc, "gfe_rx_prime"); 866 867 error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); 868 if (error) 869 goto bail; 870 rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 871 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 872 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 873 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 874 } 875 sc->sc_intrmask |= rxq->rxq_intrbits; 876 877 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); 878 if (error) 879 goto bail; 880 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 881 rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 882 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 883 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 884 sc->sc_intrmask |= rxq->rxq_intrbits; 885 } 886 887 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); 888 if (error) 889 goto bail; 890 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 891 rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 892 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 893 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 894 sc->sc_intrmask |= rxq->rxq_intrbits; 895 } 896 897 error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); 898 if (error) 899 goto bail; 900 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 901 rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 902 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 903 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 904 sc->sc_intrmask |= rxq->rxq_intrbits; 905 } 906 907 bail: 908 GE_FUNC_EXIT(sc, ""); 909 return error; 910 } 911 912 void 913 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 914 { 915 struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; 916 GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 917 if (rxq == NULL) { 918 GE_FUNC_EXIT(sc, ""); 919 return; 920 } 921 922 if (rxq->rxq_curpkt) 923 m_freem(rxq->rxq_curpkt); 924 if ((sc->sc_flags & GE_NOFREE) == 0) { 925 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 926 gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 927 } 928 GE_FUNC_EXIT(sc, ""); 929 } 930 931 void 932 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 933 { 934 GE_FUNC_ENTER(sc, "gfe_rx_stop"); 935 sc->sc_flags &= ~GE_RXACTIVE; 936 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 937 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 938 GE_WRITE(sc, EIMR, sc->sc_intrmask); 939 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); 940 do { 941 delay(10); 942 } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); 943 gfe_rx_cleanup(sc, GE_RXPRIO_HI); 944 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 945 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 946 gfe_rx_cleanup(sc, GE_RXPRIO_LO); 947 GE_FUNC_EXIT(sc, ""); 948 } 949 950 void 951 gfe_tick(void *arg) 952 { 953 struct gfe_softc * const sc = arg; 954 uint32_t intrmask; 955 unsigned int tickflags; 956 int s; 957 958 GE_FUNC_ENTER(sc, "gfe_tick"); 959 960 s = splnet(); 961 962 tickflags = sc->sc_tickflags; 963 sc->sc_tickflags = 0; 964 intrmask = sc->sc_intrmask; 965 if (tickflags & GE_TICK_TX_IFSTART) 966 gfe_ifstart(&sc->sc_ec.ec_if); 967 if (tickflags & GE_TICK_RX_RESTART) { 968 intrmask |= sc->sc_idlemask; 969 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { 970 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 971 rxq->rxq_fi = 0; 972 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 973 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 974 } 975 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { 976 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 977 rxq->rxq_fi = 0; 978 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 979 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 980 } 981 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { 982 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 983 rxq->rxq_fi = 0; 984 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 985 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 986 } 987 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { 988 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 989 rxq->rxq_fi = 0; 990 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 991 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 992 } 993 sc->sc_idlemask = 0; 994 } 995 if (intrmask != sc->sc_intrmask) { 996 sc->sc_intrmask = intrmask; 997 GE_WRITE(sc, EIMR, sc->sc_intrmask); 998 } 999 gfe_intr(sc); 1000 splx(s); 1001 1002 GE_FUNC_EXIT(sc, ""); 1003 } 1004 1005 int 1006 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 1007 { 1008 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1009 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1010 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1011 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 1012 uint32_t intrmask = sc->sc_intrmask; 1013 size_t buflen; 1014 struct mbuf *m; 1015 1016 GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1017 1018 /* 1019 * Anything in the pending queue to enqueue? if not, punt. Likewise 1020 * if the txq is not yet created. 1021 * otherwise grab its dmamap. 1022 */ 1023 if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { 1024 GE_FUNC_EXIT(sc, "-"); 1025 return 0; 1026 } 1027 1028 /* 1029 * Have we [over]consumed our limit of descriptors? 1030 * Do we have enough free descriptors? 1031 */ 1032 if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1033 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1034 uint32_t cmdsts; 1035 size_t pktlen; 1036 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1037 cmdsts = gt32toh(txd2->ed_cmdsts); 1038 if (cmdsts & TX_CMD_O) { 1039 int nextin; 1040 /* 1041 * Sometime the Discovery forgets to update the 1042 * last descriptor. See if we own the descriptor 1043 * after it (since we know we've turned that to 1044 * the discovery and if we owned it, the Discovery 1045 * gave it back). If we do, we know the Discovery 1046 * gave back this one but forgot to mark it as ours. 1047 */ 1048 nextin = txq->txq_fi + 1; 1049 if (nextin == GE_TXDESC_MAX) 1050 nextin = 0; 1051 GE_TXDPOSTSYNC(sc, txq, nextin); 1052 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1053 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1054 GE_TXDPRESYNC(sc, txq, nextin); 1055 GE_FUNC_EXIT(sc, "@"); 1056 return 0; 1057 } 1058 #ifdef DEBUG 1059 printf("%s: txenqueue: transmitter resynced at %d\n", 1060 device_xname(&sc->sc_dev), txq->txq_fi); 1061 #endif 1062 } 1063 if (++txq->txq_fi == GE_TXDESC_MAX) 1064 txq->txq_fi = 0; 1065 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1066 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1067 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1068 txq->txq_nactive--; 1069 1070 /* statistics */ 1071 ifp->if_opackets++; 1072 if (cmdsts & TX_STS_ES) 1073 ifp->if_oerrors++; 1074 GE_DPRINTF(sc, ("%%")); 1075 } 1076 1077 buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1078 1079 /* 1080 * If this packet would wrap around the end of the buffer, reset back 1081 * to the beginning. 1082 */ 1083 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1084 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1085 txq->txq_outptr = 0; 1086 } 1087 1088 /* 1089 * Make sure the output packet doesn't run over the beginning of 1090 * what we've already given the GT. 1091 */ 1092 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1093 txq->txq_outptr + buflen > txq->txq_inptr) { 1094 intrmask |= txq->txq_intrbits & 1095 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); 1096 if (sc->sc_intrmask != intrmask) { 1097 sc->sc_intrmask = intrmask; 1098 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1099 } 1100 GE_FUNC_EXIT(sc, "#"); 1101 return 0; 1102 } 1103 1104 /* 1105 * The end-of-list descriptor we put on last time is the starting point 1106 * for this packet. The GT is supposed to terminate list processing on 1107 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1108 * must terminate the list. 1109 */ 1110 intrmask = sc->sc_intrmask; 1111 1112 m_copydata(m, 0, m->m_pkthdr.len, 1113 (char *)txq->txq_buf_mem.gdm_kva + (int)txq->txq_outptr); 1114 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1115 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1116 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1117 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1118 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1119 1120 /* 1121 * Request a buffer interrupt every 2/3 of the way thru the transmit 1122 * buffer. 1123 */ 1124 txq->txq_ei_gapcount += buflen; 1125 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1126 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); 1127 txq->txq_ei_gapcount = 0; 1128 } else { 1129 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); 1130 } 1131 #if 0 1132 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1133 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1134 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1135 #endif 1136 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1137 1138 txq->txq_outptr += buflen; 1139 /* 1140 * Tell the SDMA engine to "Fetch!" 1141 */ 1142 GE_WRITE(sc, ESDCMR, 1143 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); 1144 1145 GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1146 1147 /* 1148 * Update the last out appropriately. 1149 */ 1150 txq->txq_nactive++; 1151 if (++txq->txq_lo == GE_TXDESC_MAX) 1152 txq->txq_lo = 0; 1153 1154 /* 1155 * Move mbuf from the pending queue to the snd queue. 1156 */ 1157 IF_DEQUEUE(&txq->txq_pendq, m); 1158 if (ifp->if_bpf != NULL) 1159 bpf_ops->bpf_mtap(ifp->if_bpf, m); 1160 m_freem(m); 1161 ifp->if_flags &= ~IFF_OACTIVE; 1162 1163 /* 1164 * Since we have put an item into the packet queue, we now want 1165 * an interrupt when the transmit queue finishes processing the 1166 * list. But only update the mask if needs changing. 1167 */ 1168 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); 1169 if (sc->sc_intrmask != intrmask) { 1170 sc->sc_intrmask = intrmask; 1171 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1172 } 1173 if (ifp->if_timer == 0) 1174 ifp->if_timer = 5; 1175 GE_FUNC_EXIT(sc, "*"); 1176 return 1; 1177 } 1178 1179 uint32_t 1180 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1181 { 1182 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1183 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1184 1185 GE_FUNC_ENTER(sc, "gfe_tx_done"); 1186 1187 if (txq == NULL) { 1188 GE_FUNC_EXIT(sc, ""); 1189 return intrmask; 1190 } 1191 1192 while (txq->txq_nactive > 0) { 1193 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1194 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1195 uint32_t cmdsts; 1196 size_t pktlen; 1197 1198 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1199 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1200 int nextin; 1201 1202 if (txq->txq_nactive == 1) { 1203 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1204 GE_FUNC_EXIT(sc, ""); 1205 return intrmask; 1206 } 1207 /* 1208 * Sometimes the Discovery forgets to update the 1209 * ownership bit in the descriptor. See if we own the 1210 * descriptor after it (since we know we've turned 1211 * that to the Discovery and if we own it now then the 1212 * Discovery gave it back). If we do, we know the 1213 * Discovery gave back this one but forgot to mark it 1214 * as ours. 1215 */ 1216 nextin = txq->txq_fi + 1; 1217 if (nextin == GE_TXDESC_MAX) 1218 nextin = 0; 1219 GE_TXDPOSTSYNC(sc, txq, nextin); 1220 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1221 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1222 GE_TXDPRESYNC(sc, txq, nextin); 1223 GE_FUNC_EXIT(sc, ""); 1224 return intrmask; 1225 } 1226 #ifdef DEBUG 1227 printf("%s: txdone: transmitter resynced at %d\n", 1228 device_xname(&sc->sc_dev), txq->txq_fi); 1229 #endif 1230 } 1231 #if 0 1232 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1233 txq->txq_lo, 1234 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1235 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1236 #endif 1237 GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1238 if (++txq->txq_fi == GE_TXDESC_MAX) 1239 txq->txq_fi = 0; 1240 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1241 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1242 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1243 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1244 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1245 1246 /* statistics */ 1247 ifp->if_opackets++; 1248 if (cmdsts & TX_STS_ES) 1249 ifp->if_oerrors++; 1250 1251 /* txd->ed_bufptr = 0; */ 1252 1253 ifp->if_timer = 5; 1254 --txq->txq_nactive; 1255 } 1256 if (txq->txq_nactive != 0) 1257 panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1258 device_xname(&sc->sc_dev), txprio, txq->txq_nactive); 1259 ifp->if_timer = 0; 1260 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); 1261 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); 1262 GE_FUNC_EXIT(sc, ""); 1263 return intrmask; 1264 } 1265 1266 int 1267 gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) 1268 { 1269 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1270 int error; 1271 1272 GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); 1273 1274 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1275 GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); 1276 if (error) { 1277 GE_FUNC_EXIT(sc, ""); 1278 return error; 1279 } 1280 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); 1281 if (error) { 1282 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1283 GE_FUNC_EXIT(sc, ""); 1284 return error; 1285 } 1286 GE_FUNC_EXIT(sc, ""); 1287 return 0; 1288 } 1289 1290 int 1291 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1292 { 1293 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1294 volatile struct gt_eth_desc *txd; 1295 unsigned int i; 1296 bus_addr_t addr; 1297 1298 GE_FUNC_ENTER(sc, "gfe_tx_start"); 1299 1300 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1301 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1302 1303 if (sc->sc_flags & GE_NOFREE) { 1304 KASSERT(txq->txq_desc_mem.gdm_kva != NULL); 1305 KASSERT(txq->txq_buf_mem.gdm_kva != NULL); 1306 } else { 1307 int error = gfe_tx_txqalloc(sc, txprio); 1308 if (error) { 1309 GE_FUNC_EXIT(sc, "!"); 1310 return error; 1311 } 1312 } 1313 1314 txq->txq_descs = 1315 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1316 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1317 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1318 1319 txq->txq_pendq.ifq_maxlen = 10; 1320 txq->txq_ei_gapcount = 0; 1321 txq->txq_nactive = 0; 1322 txq->txq_fi = 0; 1323 txq->txq_lo = 0; 1324 txq->txq_inptr = GE_TXBUF_SIZE; 1325 txq->txq_outptr = 0; 1326 for (i = 0, txd = txq->txq_descs, 1327 addr = txq->txq_desc_busaddr + sizeof(*txd); 1328 i < GE_TXDESC_MAX - 1; 1329 i++, txd++, addr += sizeof(*txd)) { 1330 /* 1331 * update the nxtptr to point to the next txd. 1332 */ 1333 txd->ed_cmdsts = 0; 1334 txd->ed_nxtptr = htogt32(addr); 1335 } 1336 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1337 htogt32(txq->txq_desc_busaddr); 1338 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1339 GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1340 1341 switch (txprio) { 1342 case GE_TXPRIO_HI: 1343 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; 1344 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1345 txq->txq_epsrbits = ETH_EPSR_TxHigh; 1346 txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); 1347 GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); 1348 break; 1349 1350 case GE_TXPRIO_LO: 1351 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; 1352 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1353 txq->txq_epsrbits = ETH_EPSR_TxLow; 1354 txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); 1355 GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); 1356 break; 1357 1358 case GE_TXPRIO_NONE: 1359 break; 1360 } 1361 #if 0 1362 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1363 gt_write(device_parent(&sc->sc_dev), txq->txq_ectdp, 1364 txq->txq_desc_busaddr); 1365 GE_DPRINTF(sc, (")")); 1366 #endif 1367 1368 /* 1369 * If we are restarting, there may be packets in the pending queue 1370 * waiting to be enqueued. Try enqueuing packets from both priority 1371 * queues until the pending queue is empty or there no room for them 1372 * on the device. 1373 */ 1374 while (gfe_tx_enqueue(sc, txprio)) 1375 continue; 1376 1377 GE_FUNC_EXIT(sc, ""); 1378 return 0; 1379 } 1380 1381 void 1382 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1383 { 1384 struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1385 1386 GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1387 if (txq == NULL) { 1388 GE_FUNC_EXIT(sc, ""); 1389 return; 1390 } 1391 1392 if (!flush) { 1393 GE_FUNC_EXIT(sc, ""); 1394 return; 1395 } 1396 1397 if ((sc->sc_flags & GE_NOFREE) == 0) { 1398 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1399 gfe_dmamem_free(sc, &txq->txq_buf_mem); 1400 } 1401 GE_FUNC_EXIT(sc, "-F"); 1402 } 1403 1404 void 1405 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1406 { 1407 GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1408 1409 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); 1410 1411 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1412 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1413 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1414 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1415 1416 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1417 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1418 1419 sc->sc_ec.ec_if.if_timer = 0; 1420 GE_FUNC_EXIT(sc, ""); 1421 } 1422 1423 int 1424 gfe_intr(void *arg) 1425 { 1426 struct gfe_softc * const sc = arg; 1427 uint32_t cause; 1428 uint32_t intrmask = sc->sc_intrmask; 1429 int claim = 0; 1430 int cnt; 1431 1432 GE_FUNC_ENTER(sc, "gfe_intr"); 1433 1434 for (cnt = 0; cnt < 4; cnt++) { 1435 if (sc->sc_intrmask != intrmask) { 1436 sc->sc_intrmask = intrmask; 1437 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1438 } 1439 cause = GE_READ(sc, EICR); 1440 cause &= sc->sc_intrmask; 1441 GE_DPRINTF(sc, (".%#x", cause)); 1442 if (cause == 0) 1443 break; 1444 1445 claim = 1; 1446 1447 GE_WRITE(sc, EICR, ~cause); 1448 #ifndef GE_NORX 1449 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) 1450 intrmask = gfe_rx_process(sc, cause, intrmask); 1451 #endif 1452 1453 #ifndef GE_NOTX 1454 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) 1455 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1456 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) 1457 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1458 #endif 1459 if (cause & ETH_IR_MIIPhySTC) { 1460 sc->sc_flags |= GE_PHYSTSCHG; 1461 /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1462 } 1463 } 1464 1465 while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) 1466 continue; 1467 while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) 1468 continue; 1469 1470 GE_FUNC_EXIT(sc, ""); 1471 return claim; 1472 } 1473 1474 int 1475 gfe_mii_read (device_t self, int phy, int reg) 1476 { 1477 return gt_mii_read(self, device_parent(self), phy, reg); 1478 } 1479 1480 void 1481 gfe_mii_write (device_t self, int phy, int reg, int value) 1482 { 1483 gt_mii_write(self, device_parent(self), phy, reg, value); 1484 } 1485 1486 void 1487 gfe_mii_statchg (device_t self) 1488 { 1489 /* struct gfe_softc *sc = device_private(self); */ 1490 /* do nothing? */ 1491 } 1492 1493 int 1494 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1495 { 1496 int error = 0; 1497 GE_FUNC_ENTER(sc, "gfe_whack"); 1498 1499 switch (op) { 1500 case GE_WHACK_RESTART: 1501 #ifndef GE_NOTX 1502 gfe_tx_stop(sc, op); 1503 #endif 1504 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1505 /* FALLTHROUGH */ 1506 case GE_WHACK_START: 1507 #ifndef GE_NOHASH 1508 if (error == 0 && sc->sc_hashtable == NULL) { 1509 error = gfe_hash_alloc(sc); 1510 if (error) 1511 break; 1512 } 1513 if (op != GE_WHACK_RESTART) 1514 gfe_hash_fill(sc); 1515 #endif 1516 #ifndef GE_NORX 1517 if (op != GE_WHACK_RESTART) { 1518 error = gfe_rx_prime(sc); 1519 if (error) 1520 break; 1521 } 1522 #endif 1523 #ifndef GE_NOTX 1524 error = gfe_tx_start(sc, GE_TXPRIO_HI); 1525 if (error) 1526 break; 1527 #endif 1528 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1529 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1530 GE_WRITE(sc, EPCXR, sc->sc_pcxr); 1531 GE_WRITE(sc, EICR, 0); 1532 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1533 #ifndef GE_NOHASH 1534 GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1535 #endif 1536 #ifndef GE_NORX 1537 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); 1538 sc->sc_flags |= GE_RXACTIVE; 1539 #endif 1540 /* FALLTHROUGH */ 1541 case GE_WHACK_CHANGE: 1542 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1543 GE_READ(sc, EPCR), GE_READ(sc, EIMR))); 1544 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1545 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1546 gfe_ifstart(&sc->sc_ec.ec_if); 1547 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1548 GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); 1549 GE_FUNC_EXIT(sc, ""); 1550 return error; 1551 case GE_WHACK_STOP: 1552 break; 1553 } 1554 1555 #ifdef GE_DEBUG 1556 if (error) 1557 GE_DPRINTF(sc, (" failed: %d\n", error)); 1558 #endif 1559 GE_WRITE(sc, EPCR, sc->sc_pcr); 1560 GE_WRITE(sc, EIMR, 0); 1561 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1562 #ifndef GE_NOTX 1563 gfe_tx_stop(sc, GE_WHACK_STOP); 1564 #endif 1565 #ifndef GE_NORX 1566 gfe_rx_stop(sc, GE_WHACK_STOP); 1567 #endif 1568 #ifndef GE_NOHASH 1569 if ((sc->sc_flags & GE_NOFREE) == 0) { 1570 gfe_dmamem_free(sc, &sc->sc_hash_mem); 1571 sc->sc_hashtable = NULL; 1572 } 1573 #endif 1574 1575 GE_FUNC_EXIT(sc, ""); 1576 return error; 1577 } 1578 1579 int 1580 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1581 { 1582 uint32_t w0, add0, add1; 1583 uint32_t result; 1584 1585 GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1586 add0 = ((uint32_t) eaddr[5] << 0) | 1587 ((uint32_t) eaddr[4] << 8) | 1588 ((uint32_t) eaddr[3] << 16); 1589 1590 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1591 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1592 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1593 1594 add1 = ((uint32_t) eaddr[2] << 0) | 1595 ((uint32_t) eaddr[1] << 8) | 1596 ((uint32_t) eaddr[0] << 16); 1597 1598 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1599 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1600 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1601 1602 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1603 /* 1604 * hashResult is the 15 bits Hash entry address. 1605 * ethernetADD is a 48 bit number, which is derived from the Ethernet 1606 * MAC address, by nibble swapping in every byte (i.e MAC address 1607 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1608 */ 1609 1610 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1611 /* 1612 * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1613 * 1614 * hashFunc0 calculates the hashResult in the following manner: 1615 * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1616 * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1617 */ 1618 result = (add0 & 3) | ((add0 >> 6) & ~3); 1619 result ^= (add0 >> 15) ^ (add1 >> 0); 1620 result &= 0x1ff; 1621 /* 1622 * hashResult[14:9] = ethernetADD[7:2] 1623 */ 1624 result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1625 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1626 } else { 1627 #define TRIBITFLIP 073516240 /* yes its in octal */ 1628 /* 1629 * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1630 * 1631 * hashFunc1 calculates the hashResult in the following manner: 1632 * hashResult[08:00] = ethernetADD[06:14] 1633 * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1634 */ 1635 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1636 /* 1637 * Now bitswap those 9 bits 1638 */ 1639 result = 0; 1640 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1641 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1642 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1643 1644 /* 1645 * hashResult[14:09] = ethernetADD[00:05] 1646 */ 1647 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1648 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1649 GE_DPRINTF(sc, ("1(%#x)", result)); 1650 } 1651 GE_FUNC_EXIT(sc, ""); 1652 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1653 } 1654 1655 int 1656 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1657 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1658 { 1659 uint64_t he; 1660 uint64_t *maybe_he_p = NULL; 1661 int limit; 1662 int hash; 1663 int maybe_hash = 0; 1664 1665 GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1666 1667 hash = gfe_hash_compute(sc, eaddr); 1668 1669 if (sc->sc_hashtable == NULL) { 1670 panic("%s:%d: hashtable == NULL!", device_xname(&sc->sc_dev), 1671 __LINE__); 1672 } 1673 1674 /* 1675 * Assume we are going to insert so create the hash entry we 1676 * are going to insert. We also use it to match entries we 1677 * will be removing. 1678 */ 1679 he = ((uint64_t) eaddr[5] << 43) | 1680 ((uint64_t) eaddr[4] << 35) | 1681 ((uint64_t) eaddr[3] << 27) | 1682 ((uint64_t) eaddr[2] << 19) | 1683 ((uint64_t) eaddr[1] << 11) | 1684 ((uint64_t) eaddr[0] << 3) | 1685 HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1686 1687 /* 1688 * The GT will search upto 12 entries for a hit, so we must mimic that. 1689 */ 1690 hash &= sc->sc_hashmask / sizeof(he); 1691 for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1692 /* 1693 * Does the GT wrap at the end, stop at the, or overrun the 1694 * end? Assume it wraps for now. Stash a copy of the 1695 * current hash entry. 1696 */ 1697 uint64_t *he_p = &sc->sc_hashtable[hash]; 1698 uint64_t thishe = *he_p; 1699 1700 /* 1701 * If the hash entry isn't valid, that break the chain. And 1702 * this entry a good candidate for reuse. 1703 */ 1704 if ((thishe & HSH_V) == 0) { 1705 maybe_he_p = he_p; 1706 break; 1707 } 1708 1709 /* 1710 * If the hash entry has the same address we are looking for 1711 * then ... if we are removing and the skip bit is set, its 1712 * already been removed. if are adding and the skip bit is 1713 * clear, then its already added. In either return EBUSY 1714 * indicating the op has already been done. Otherwise flip 1715 * the skip bit and return 0. 1716 */ 1717 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1718 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1719 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1720 return EBUSY; 1721 *he_p = thishe ^ HSH_S; 1722 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1723 hash * sizeof(he), sizeof(he), 1724 BUS_DMASYNC_PREWRITE); 1725 GE_FUNC_EXIT(sc, "^"); 1726 return 0; 1727 } 1728 1729 /* 1730 * If we haven't found a slot for the entry and this entry 1731 * is currently being skipped, return this entry. 1732 */ 1733 if (maybe_he_p == NULL && (thishe & HSH_S)) { 1734 maybe_he_p = he_p; 1735 maybe_hash = hash; 1736 } 1737 1738 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1739 } 1740 1741 /* 1742 * If we got here, then there was no entry to remove. 1743 */ 1744 if (op == GE_HASH_REMOVE) { 1745 GE_FUNC_EXIT(sc, "?"); 1746 return ENOENT; 1747 } 1748 1749 /* 1750 * If we couldn't find a slot, return an error. 1751 */ 1752 if (maybe_he_p == NULL) { 1753 GE_FUNC_EXIT(sc, "!"); 1754 return ENOSPC; 1755 } 1756 1757 /* Update the entry. 1758 */ 1759 *maybe_he_p = he; 1760 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1761 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1762 GE_FUNC_EXIT(sc, "+"); 1763 return 0; 1764 } 1765 1766 int 1767 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) 1768 { 1769 struct gfe_softc * const sc = ec->ec_if.if_softc; 1770 int error; 1771 enum gfe_hash_op op; 1772 enum gfe_rxprio prio; 1773 1774 GE_FUNC_ENTER(sc, "hash_multichg"); 1775 /* 1776 * Is this a wildcard entry? If so and its being removed, recompute. 1777 */ 1778 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1779 if (cmd == SIOCDELMULTI) { 1780 GE_FUNC_EXIT(sc, ""); 1781 return ENETRESET; 1782 } 1783 1784 /* 1785 * Switch in 1786 */ 1787 sc->sc_flags |= GE_ALLMULTI; 1788 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1789 sc->sc_pcr |= ETH_EPCR_PM; 1790 GE_WRITE(sc, EPCR, sc->sc_pcr); 1791 GE_FUNC_EXIT(sc, ""); 1792 return 0; 1793 } 1794 GE_FUNC_EXIT(sc, ""); 1795 return ENETRESET; 1796 } 1797 1798 prio = GE_RXPRIO_MEDLO; 1799 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1800 1801 if (sc->sc_hashtable == NULL) { 1802 GE_FUNC_EXIT(sc, ""); 1803 return 0; 1804 } 1805 1806 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1807 if (error == EBUSY) { 1808 printf("%s: multichg: tried to %s %s again\n", 1809 device_xname(&sc->sc_dev), 1810 cmd == SIOCDELMULTI ? "remove" : "add", 1811 ether_sprintf(enm->enm_addrlo)); 1812 GE_FUNC_EXIT(sc, ""); 1813 return 0; 1814 } 1815 1816 if (error == ENOENT) { 1817 printf("%s: multichg: failed to remove %s: not in table\n", 1818 device_xname(&sc->sc_dev), 1819 ether_sprintf(enm->enm_addrlo)); 1820 GE_FUNC_EXIT(sc, ""); 1821 return 0; 1822 } 1823 1824 if (error == ENOSPC) { 1825 printf("%s: multichg: failed to add %s: no space; regenerating table\n", 1826 device_xname(&sc->sc_dev), 1827 ether_sprintf(enm->enm_addrlo)); 1828 GE_FUNC_EXIT(sc, ""); 1829 return ENETRESET; 1830 } 1831 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 1832 device_xname(&sc->sc_dev), 1833 cmd == SIOCDELMULTI ? "remove" : "add", 1834 ether_sprintf(enm->enm_addrlo))); 1835 GE_FUNC_EXIT(sc, ""); 1836 return 0; 1837 } 1838 1839 int 1840 gfe_hash_fill(struct gfe_softc *sc) 1841 { 1842 struct ether_multistep step; 1843 struct ether_multi *enm; 1844 int error; 1845 1846 GE_FUNC_ENTER(sc, "gfe_hash_fill"); 1847 1848 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 1849 CLLADDR(sc->sc_ec.ec_if.if_sadl)); 1850 if (error) 1851 GE_FUNC_EXIT(sc, "!"); 1852 return error; 1853 1854 sc->sc_flags &= ~GE_ALLMULTI; 1855 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) 1856 sc->sc_pcr &= ~ETH_EPCR_PM; 1857 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 1858 while (enm != NULL) { 1859 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1860 sc->sc_flags |= GE_ALLMULTI; 1861 sc->sc_pcr |= ETH_EPCR_PM; 1862 } else { 1863 error = gfe_hash_entry_op(sc, GE_HASH_ADD, 1864 GE_RXPRIO_MEDLO, enm->enm_addrlo); 1865 if (error == ENOSPC) 1866 break; 1867 } 1868 ETHER_NEXT_MULTI(step, enm); 1869 } 1870 1871 GE_FUNC_EXIT(sc, ""); 1872 return error; 1873 } 1874 1875 int 1876 gfe_hash_alloc(struct gfe_softc *sc) 1877 { 1878 int error; 1879 GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 1880 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 1881 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 1882 BUS_DMA_NOCACHE); 1883 if (error) { 1884 printf("%s: failed to allocate %d bytes for hash table: %d\n", 1885 device_xname(&sc->sc_dev), sc->sc_hashmask + 1, error); 1886 GE_FUNC_EXIT(sc, ""); 1887 return error; 1888 } 1889 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 1890 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 1891 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1892 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 1893 GE_FUNC_EXIT(sc, ""); 1894 return 0; 1895 } 1896