1 /* $NetBSD: if_gfe.c,v 1.13 2003/08/05 14:55:06 scw Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the NetBSD Project by 18 * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 * or promote products derived from this software without specific prior 21 * written permission. 22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * if_gfe.c -- GT ethernet MAC driver 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.13 2003/08/05 14:55:06 scw Exp $"); 46 47 #include "opt_inet.h" 48 #include "bpfilter.h" 49 50 #include <sys/param.h> 51 #include <sys/types.h> 52 #include <sys/inttypes.h> 53 #include <sys/queue.h> 54 55 #include <uvm/uvm_extern.h> 56 57 #include <sys/callout.h> 58 #include <sys/device.h> 59 #include <sys/errno.h> 60 #include <sys/ioctl.h> 61 #include <sys/mbuf.h> 62 #include <sys/socket.h> 63 64 #include <machine/bus.h> 65 66 #include <net/if.h> 67 #include <net/if_dl.h> 68 #include <net/if_ether.h> 69 #include <net/if_media.h> 70 71 #ifdef INET 72 #include <netinet/in.h> 73 #include <netinet/if_inarp.h> 74 #endif 75 #if NBPFILTER > 0 76 #include <net/bpf.h> 77 #endif 78 79 #include <dev/mii/miivar.h> 80 81 #include <dev/marvell/gtintrreg.h> 82 #include <dev/marvell/gtethreg.h> 83 84 #include <dev/marvell/gtvar.h> 85 #include <dev/marvell/if_gfevar.h> 86 87 #define GE_READ(sc, reg) \ 88 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) 89 #define GE_WRITE(sc, reg, v) \ 90 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) 91 92 #define GE_DEBUG 93 #if 0 94 #define GE_NOHASH 95 #define GE_NORX 96 #endif 97 98 #ifdef GE_DEBUG 99 #define GE_DPRINTF(sc, a) do \ 100 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 101 printf a; \ 102 while (0) 103 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 104 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 105 #else 106 #define GE_DPRINTF(sc, a) do { } while (0) 107 #define GE_FUNC_ENTER(sc, func) do { } while (0) 108 #define GE_FUNC_EXIT(sc, str) do { } while (0) 109 #endif 110 enum gfe_whack_op { 111 GE_WHACK_START, GE_WHACK_RESTART, 112 GE_WHACK_CHANGE, GE_WHACK_STOP 113 }; 114 115 enum gfe_hash_op { 116 GE_HASH_ADD, GE_HASH_REMOVE, 117 }; 118 119 #if 1 120 #define htogt32(a) htobe32(a) 121 #define gt32toh(a) be32toh(a) 122 #else 123 #define htogt32(a) htole32(a) 124 #define gt32toh(a) le32toh(a) 125 #endif 126 127 #define GE_RXDSYNC(sc, rxq, n, ops) \ 128 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 129 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 130 (ops)) 131 #define GE_RXDPRESYNC(sc, rxq, n) \ 132 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 133 #define GE_RXDPOSTSYNC(sc, rxq, n) \ 134 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 135 136 #define GE_TXDSYNC(sc, txq, n, ops) \ 137 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 138 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 139 (ops)) 140 #define GE_TXDPRESYNC(sc, txq, n) \ 141 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 142 #define GE_TXDPOSTSYNC(sc, txq, n) \ 143 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 144 145 #define STATIC 146 147 STATIC int gfe_match (struct device *, struct cfdata *, void *); 148 STATIC void gfe_attach (struct device *, struct device *, void *); 149 150 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 151 size_t, int); 152 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 153 154 STATIC int gfe_ifioctl (struct ifnet *, u_long, caddr_t); 155 STATIC void gfe_ifstart (struct ifnet *); 156 STATIC void gfe_ifwatchdog (struct ifnet *); 157 158 STATIC int gfe_mii_mediachange (struct ifnet *); 159 STATIC void gfe_mii_mediastatus (struct ifnet *, struct ifmediareq *); 160 STATIC int gfe_mii_read (struct device *, int, int); 161 STATIC void gfe_mii_write (struct device *, int, int, int); 162 STATIC void gfe_mii_statchg (struct device *); 163 164 STATIC void gfe_tick(void *arg); 165 166 STATIC void gfe_tx_restart(void *); 167 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 168 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 169 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 170 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 171 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 172 173 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 174 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 175 STATIC int gfe_rx_prime(struct gfe_softc *); 176 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 177 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 178 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 179 180 STATIC int gfe_intr(void *); 181 182 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 183 184 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 185 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 186 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 187 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 188 u_long); 189 STATIC int gfe_hash_fill(struct gfe_softc *); 190 STATIC int gfe_hash_alloc(struct gfe_softc *); 191 192 /* Linkup to the rest of the kernel */ 193 CFATTACH_DECL(gfe, sizeof(struct gfe_softc), 194 gfe_match, gfe_attach, NULL, NULL); 195 196 extern struct cfdriver gfe_cd; 197 198 int 199 gfe_match(struct device *parent, struct cfdata *cf, void *aux) 200 { 201 struct gt_softc *gt = (struct gt_softc *) parent; 202 struct gt_attach_args *ga = aux; 203 uint8_t enaddr[6]; 204 205 if (!GT_ETHEROK(gt, ga, &gfe_cd)) 206 return 0; 207 208 if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) 209 return 0; 210 211 if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && 212 enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) 213 return 0; 214 215 return 1; 216 } 217 218 /* 219 * Attach this instance, and then all the sub-devices 220 */ 221 void 222 gfe_attach(struct device *parent, struct device *self, void *aux) 223 { 224 struct gt_attach_args * const ga = aux; 225 struct gt_softc * const gt = (struct gt_softc *) parent; 226 struct gfe_softc * const sc = (struct gfe_softc *) self; 227 struct ifnet * const ifp = &sc->sc_ec.ec_if; 228 uint32_t data; 229 uint8_t enaddr[6]; 230 int phyaddr; 231 uint32_t sdcr; 232 233 GT_ETHERFOUND(gt, ga); 234 235 sc->sc_gt_memt = ga->ga_memt; 236 sc->sc_gt_memh = ga->ga_memh; 237 sc->sc_dmat = ga->ga_dmat; 238 sc->sc_macno = ga->ga_unit; 239 240 if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, 241 ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { 242 aprint_error(": failed to map registers\n"); 243 } 244 245 callout_init(&sc->sc_co); 246 247 data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); 248 phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); 249 250 gtget_macaddr(gt, sc->sc_macno, enaddr); 251 252 sc->sc_pcr = GE_READ(sc, EPCR); 253 sc->sc_pcxr = GE_READ(sc, EPCXR); 254 sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; 255 256 aprint_normal(": address %s", ether_sprintf(enaddr)); 257 258 #if defined(DEBUG) 259 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 260 #endif 261 262 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 263 if (sc->sc_dev.dv_cfdata->cf_flags & 1) { 264 aprint_normal(", phy %d (rmii)", phyaddr); 265 sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 266 } else { 267 aprint_normal(", phy %d (mii)", phyaddr); 268 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 269 } 270 sc->sc_pcxr &= ~(3 << 14); 271 sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); 272 273 if (sc->sc_pcr & ETH_EPCR_EN) { 274 int tries = 1000; 275 /* 276 * Abort transmitter and receiver and wait for them to quiese 277 */ 278 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); 279 do { 280 delay(100); 281 } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); 282 } 283 284 sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 285 286 #if defined(DEBUG) 287 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 288 #endif 289 290 /* 291 * Now turn off the GT. If it didn't quiese, too ***ing bad. 292 */ 293 GE_WRITE(sc, EPCR, sc->sc_pcr); 294 GE_WRITE(sc, EIMR, sc->sc_intrmask); 295 sdcr = GE_READ(sc, ESDCR); 296 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 297 sdcr |= ETH_ESDCR_RIFB; 298 GE_WRITE(sc, ESDCR, sdcr); 299 sc->sc_max_frame_length = 1536; 300 301 aprint_normal("\n"); 302 sc->sc_mii.mii_ifp = ifp; 303 sc->sc_mii.mii_readreg = gfe_mii_read; 304 sc->sc_mii.mii_writereg = gfe_mii_write; 305 sc->sc_mii.mii_statchg = gfe_mii_statchg; 306 307 ifmedia_init(&sc->sc_mii.mii_media, 0, gfe_mii_mediachange, 308 gfe_mii_mediastatus); 309 310 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, 311 MII_OFFSET_ANY, MIIF_NOISOLATE); 312 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 313 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 314 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 315 } else { 316 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 317 } 318 319 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 320 ifp->if_softc = sc; 321 /* ifp->if_mowner = &sc->sc_mowner; */ 322 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 323 #if 0 324 ifp->if_flags |= IFF_DEBUG; 325 #endif 326 ifp->if_ioctl = gfe_ifioctl; 327 ifp->if_start = gfe_ifstart; 328 ifp->if_watchdog = gfe_ifwatchdog; 329 330 if_attach(ifp); 331 ether_ifattach(ifp, enaddr); 332 #if NBPFILTER > 0 333 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 334 #endif 335 #if NRND > 0 336 rnd_attach_source(&sc->sc_rnd_source, self->dv_xname, RND_TYPE_NET, 0); 337 #endif 338 intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, 339 gfe_intr, sc); 340 } 341 342 int 343 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 344 size_t size, int flags) 345 { 346 int error = 0; 347 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 348 gdm->gdm_size = size; 349 gdm->gdm_maxsegs = maxsegs; 350 351 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 352 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 353 BUS_DMA_NOWAIT); 354 if (error) 355 goto fail; 356 357 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 358 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 359 if (error) 360 goto fail; 361 362 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 363 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); 364 if (error) 365 goto fail; 366 367 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 368 gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 369 if (error) 370 goto fail; 371 372 /* invalidate from cache */ 373 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 374 BUS_DMASYNC_PREREAD); 375 fail: 376 if (error) { 377 gfe_dmamem_free(sc, gdm); 378 GE_DPRINTF(sc, (":err=%d", error)); 379 } 380 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 381 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 382 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 383 GE_FUNC_EXIT(sc, ""); 384 return error; 385 } 386 387 void 388 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 389 { 390 GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 391 if (gdm->gdm_map) 392 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 393 if (gdm->gdm_kva) 394 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 395 if (gdm->gdm_nsegs > 0) 396 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 397 gdm->gdm_map = NULL; 398 gdm->gdm_kva = NULL; 399 gdm->gdm_nsegs = 0; 400 GE_FUNC_EXIT(sc, ""); 401 } 402 403 int 404 gfe_ifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 405 { 406 struct gfe_softc * const sc = ifp->if_softc; 407 struct ifreq *ifr = (struct ifreq *) data; 408 struct ifaddr *ifa = (struct ifaddr *) data; 409 int s, error = 0; 410 411 GE_FUNC_ENTER(sc, "gfe_ifioctl"); 412 s = splnet(); 413 414 switch (cmd) { 415 case SIOCSIFADDR: 416 ifp->if_flags |= IFF_UP; 417 switch (ifa->ifa_addr->sa_family) { 418 #ifdef INET 419 case AF_INET: 420 error = gfe_whack(sc, GE_WHACK_START); 421 if (error == 0) 422 arp_ifinit(ifp, ifa); 423 break; 424 #endif 425 default: 426 error = gfe_whack(sc, GE_WHACK_START); 427 break; 428 } 429 break; 430 431 case SIOCSIFFLAGS: 432 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 433 case IFF_UP|IFF_RUNNING:/* active->active, update */ 434 error = gfe_whack(sc, GE_WHACK_CHANGE); 435 break; 436 case IFF_RUNNING: /* not up, so we stop */ 437 error = gfe_whack(sc, GE_WHACK_STOP); 438 break; 439 case IFF_UP: /* not running, so we start */ 440 error = gfe_whack(sc, GE_WHACK_START); 441 break; 442 case 0: /* idle->idle: do nothing */ 443 break; 444 } 445 break; 446 447 case SIOCADDMULTI: 448 case SIOCDELMULTI: 449 error = (cmd == SIOCADDMULTI) 450 ? ether_addmulti(ifr, &sc->sc_ec) 451 : ether_delmulti(ifr, &sc->sc_ec); 452 if (error == ENETRESET) { 453 if (ifp->if_flags & IFF_RUNNING) 454 error = gfe_whack(sc, GE_WHACK_CHANGE); 455 else 456 error = 0; 457 } 458 break; 459 460 case SIOCSIFMTU: 461 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 462 error = EINVAL; 463 break; 464 } 465 ifp->if_mtu = ifr->ifr_mtu; 466 break; 467 468 case SIOCSIFMEDIA: 469 case SIOCGIFMEDIA: 470 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 471 break; 472 473 default: 474 error = EINVAL; 475 break; 476 } 477 splx(s); 478 GE_FUNC_EXIT(sc, ""); 479 return error; 480 } 481 482 void 483 gfe_ifstart(struct ifnet *ifp) 484 { 485 struct gfe_softc * const sc = ifp->if_softc; 486 struct mbuf *m; 487 488 GE_FUNC_ENTER(sc, "gfe_ifstart"); 489 490 if ((ifp->if_flags & IFF_RUNNING) == 0) { 491 GE_FUNC_EXIT(sc, "$"); 492 return; 493 } 494 495 if (sc->sc_txq[GE_TXPRIO_HI] == NULL) { 496 ifp->if_flags |= IFF_OACTIVE; 497 #if defined(DEBUG) || defined(DIAGNOSTIC) 498 printf("%s: ifstart: txq not yet created\n", ifp->if_xname); 499 #endif 500 GE_FUNC_EXIT(sc, ""); 501 return; 502 } 503 504 for (;;) { 505 IF_DEQUEUE(&ifp->if_snd, m); 506 if (m == NULL) { 507 ifp->if_flags &= ~IFF_OACTIVE; 508 GE_FUNC_EXIT(sc, ""); 509 return; 510 } 511 512 /* 513 * No space in the pending queue? try later. 514 */ 515 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI]->txq_pendq)) 516 break; 517 518 /* 519 * Try to enqueue a mbuf to the device. If that fails, we 520 * can always try to map the next mbuf. 521 */ 522 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI]->txq_pendq, m); 523 GE_DPRINTF(sc, (">")); 524 #ifndef GE_NOTX 525 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 526 #endif 527 } 528 529 /* 530 * Attempt to queue the mbuf for send failed. 531 */ 532 IF_PREPEND(&ifp->if_snd, m); 533 ifp->if_flags |= IFF_OACTIVE; 534 GE_FUNC_EXIT(sc, "%%"); 535 } 536 537 void 538 gfe_ifwatchdog(struct ifnet *ifp) 539 { 540 struct gfe_softc * const sc = ifp->if_softc; 541 struct gfe_txqueue *txq; 542 543 GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 544 printf("%s: device timeout", sc->sc_dev.dv_xname); 545 if ((txq = sc->sc_txq[GE_TXPRIO_HI]) != NULL) { 546 uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 547 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 548 GE_TXDPOSTSYNC(sc, txq, curtxdnum); 549 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 550 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 551 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 552 GE_READ(sc, EICR)); 553 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 554 GE_TXDPRESYNC(sc, txq, curtxdnum); 555 } 556 printf("\n"); 557 ifp->if_oerrors++; 558 (void) gfe_whack(sc, GE_WHACK_RESTART); 559 GE_FUNC_EXIT(sc, ""); 560 } 561 562 int 563 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 564 { 565 struct gfe_rxqueue *rxq; 566 volatile struct gt_eth_desc *rxd; 567 const bus_dma_segment_t *ds; 568 int error; 569 int idx; 570 bus_addr_t nxtaddr; 571 bus_size_t boff; 572 573 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 574 GE_DPRINTF(sc, ("(%d)", rxprio)); 575 if (sc->sc_rxq[rxprio] != NULL) { 576 GE_FUNC_EXIT(sc, ""); 577 return 0; 578 } 579 580 rxq = (struct gfe_rxqueue *) malloc(sizeof(*rxq), M_DEVBUF, M_NOWAIT); 581 if (rxq == NULL) { 582 GE_FUNC_EXIT(sc, "!"); 583 return ENOMEM; 584 } 585 586 memset(rxq, 0, sizeof(*rxq)); 587 588 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 589 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 590 if (error) { 591 free(rxq, M_DEVBUF); 592 GE_FUNC_EXIT(sc, "!!"); 593 return error; 594 } 595 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 596 GE_RXBUF_MEMSIZE, 0); 597 if (error) { 598 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 599 free(rxq, M_DEVBUF); 600 GE_FUNC_EXIT(sc, "!!!"); 601 return error; 602 } 603 604 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_TXMEM_SIZE); 605 606 sc->sc_rxq[rxprio] = rxq; 607 rxq->rxq_descs = 608 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 609 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 610 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 611 rxq->rxq_fi = 0; 612 rxq->rxq_active = GE_RXDESC_MAX; 613 for (idx = 0, rxd = rxq->rxq_descs, 614 boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, 615 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 616 idx < GE_RXDESC_MAX; 617 idx++, rxd++, nxtaddr += sizeof(*rxd)) { 618 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 619 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 620 rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 621 /* 622 * update the nxtptr to point to the next txd. 623 */ 624 if (idx == GE_RXDESC_MAX - 1) 625 nxtaddr = rxq->rxq_desc_busaddr; 626 rxd->ed_nxtptr = htogt32(nxtaddr); 627 boff += GE_RXBUF_SIZE; 628 if (boff == ds->ds_len) { 629 ds++; 630 boff = 0; 631 } 632 } 633 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 634 rxq->rxq_desc_mem.gdm_map->dm_mapsize, 635 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 636 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 637 rxq->rxq_buf_mem.gdm_map->dm_mapsize, 638 BUS_DMASYNC_PREREAD); 639 640 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; 641 switch (rxprio) { 642 case GE_RXPRIO_HI: 643 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; 644 rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); 645 rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); 646 break; 647 case GE_RXPRIO_MEDHI: 648 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; 649 rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); 650 rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); 651 break; 652 case GE_RXPRIO_MEDLO: 653 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; 654 rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); 655 rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); 656 break; 657 case GE_RXPRIO_LO: 658 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; 659 rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); 660 rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); 661 break; 662 } 663 GE_FUNC_EXIT(sc, ""); 664 return error; 665 } 666 667 void 668 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 669 { 670 struct ifnet * const ifp = &sc->sc_ec.ec_if; 671 struct gfe_rxqueue * const rxq = sc->sc_rxq[rxprio]; 672 struct mbuf *m = rxq->rxq_curpkt; 673 674 GE_FUNC_ENTER(sc, "gfe_rx_get"); 675 GE_DPRINTF(sc, ("(%d)", rxprio)); 676 677 while (rxq->rxq_active > 0) { 678 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 679 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 680 const struct ether_header *eh; 681 unsigned int cmdsts; 682 size_t buflen; 683 684 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 685 cmdsts = gt32toh(rxd->ed_cmdsts); 686 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 687 rxq->rxq_cmdsts = cmdsts; 688 /* 689 * Sometimes the GE "forgets" to reset the ownership bit. 690 * But if the length has been rewritten, the packet is ours 691 * so pretend the O bit is set. 692 */ 693 buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 694 if ((cmdsts & RX_CMD_O) && buflen == 0) { 695 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 696 break; 697 } 698 699 /* 700 * If this is not a single buffer packet with no errors 701 * or for some reason it's bigger than our frame size, 702 * ignore it and go to the next packet. 703 */ 704 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != 705 (RX_CMD_F|RX_CMD_L) || 706 buflen > sc->sc_max_frame_length) { 707 GE_DPRINTF(sc, ("!")); 708 --rxq->rxq_active; 709 ifp->if_ipackets++; 710 ifp->if_ierrors++; 711 goto give_it_back; 712 } 713 714 if (m == NULL) { 715 MGETHDR(m, M_DONTWAIT, MT_DATA); 716 if (m == NULL) { 717 GE_DPRINTF(sc, ("?")); 718 break; 719 } 720 } 721 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 722 MCLGET(m, M_DONTWAIT); 723 if ((m->m_flags & M_EXT) == 0) { 724 GE_DPRINTF(sc, ("?")); 725 break; 726 } 727 } 728 m->m_data += 2; 729 m->m_len = 0; 730 m->m_pkthdr.len = 0; 731 m->m_pkthdr.rcvif = ifp; 732 rxq->rxq_cmdsts = cmdsts; 733 --rxq->rxq_active; 734 735 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 736 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 737 738 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 739 memcpy(m->m_data + m->m_len, rxb->rb_data, buflen); 740 m->m_len = buflen; 741 m->m_pkthdr.len = buflen; 742 m->m_flags |= M_HASFCS; 743 744 ifp->if_ipackets++; 745 #if NBPFILTER > 0 746 if (ifp->if_bpf != NULL) 747 bpf_mtap(ifp->if_bpf, m); 748 #endif 749 750 eh = (const struct ether_header *) m->m_data; 751 if ((ifp->if_flags & IFF_PROMISC) || 752 (rxq->rxq_cmdsts & RX_STS_M) == 0 || 753 (rxq->rxq_cmdsts & RX_STS_HE) || 754 (eh->ether_dhost[0] & 1) != 0 || 755 memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl), 756 ETHER_ADDR_LEN) == 0) { 757 (*ifp->if_input)(ifp, m); 758 m = NULL; 759 GE_DPRINTF(sc, (">")); 760 } else { 761 m->m_len = 0; 762 m->m_pkthdr.len = 0; 763 GE_DPRINTF(sc, ("+")); 764 } 765 rxq->rxq_cmdsts = 0; 766 767 give_it_back: 768 rxd->ed_lencnt &= ~0xffff; /* zero out length */ 769 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 770 #if 0 771 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 772 rxq->rxq_fi, 773 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 774 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 775 #endif 776 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 777 if (++rxq->rxq_fi == GE_RXDESC_MAX) 778 rxq->rxq_fi = 0; 779 rxq->rxq_active++; 780 } 781 rxq->rxq_curpkt = m; 782 GE_FUNC_EXIT(sc, ""); 783 } 784 785 uint32_t 786 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 787 { 788 struct ifnet * const ifp = &sc->sc_ec.ec_if; 789 struct gfe_rxqueue *rxq; 790 uint32_t rxbits; 791 #define RXPRIO_DECODER 0xffffaa50 792 GE_FUNC_ENTER(sc, "gfe_rx_process"); 793 794 rxbits = ETH_IR_RxBuffer_GET(cause); 795 while (rxbits) { 796 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 797 GE_DPRINTF(sc, ("%1x", rxbits)); 798 rxbits &= ~(1 << rxprio); 799 gfe_rx_get(sc, rxprio); 800 } 801 802 rxbits = ETH_IR_RxError_GET(cause); 803 while (rxbits) { 804 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 805 uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 806 int idx; 807 rxbits &= ~(1 << rxprio); 808 rxq = sc->sc_rxq[rxprio]; 809 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 810 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 811 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 812 sc->sc_tickflags |= GE_TICK_RX_RESTART; 813 callout_reset(&sc->sc_co, 1, gfe_tick, sc); 814 } 815 ifp->if_ierrors++; 816 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 817 sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi)); 818 memset(masks, 0, sizeof(masks)); 819 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 820 0, rxq->rxq_desc_mem.gdm_size, 821 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 822 for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 823 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 824 825 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 826 masks[idx/32] |= 1 << (idx & 31); 827 } 828 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 829 0, rxq->rxq_desc_mem.gdm_size, 830 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 831 #if defined(DEBUG) 832 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 833 sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi, 834 rxq->rxq_cmdsts, masks[0], masks[1]); 835 #endif 836 } 837 if ((intrmask & ETH_IR_RxBits) == 0) 838 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); 839 840 GE_FUNC_EXIT(sc, ""); 841 return intrmask; 842 } 843 844 int 845 gfe_rx_prime(struct gfe_softc *sc) 846 { 847 struct gfe_rxqueue *rxq; 848 int error; 849 850 GE_FUNC_ENTER(sc, "gfe_rx_prime"); 851 852 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 853 if (error) 854 goto bail; 855 rxq = sc->sc_rxq[GE_RXPRIO_HI]; 856 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 857 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 858 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 859 } 860 sc->sc_intrmask |= rxq->rxq_intrbits; 861 862 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 863 if (error) 864 goto bail; 865 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 866 rxq = sc->sc_rxq[GE_RXPRIO_MEDHI]; 867 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 868 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 869 sc->sc_intrmask |= rxq->rxq_intrbits; 870 } 871 872 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 873 if (error) 874 goto bail; 875 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 876 rxq = sc->sc_rxq[GE_RXPRIO_MEDLO]; 877 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 878 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 879 sc->sc_intrmask |= rxq->rxq_intrbits; 880 } 881 882 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 883 if (error) 884 goto bail; 885 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 886 rxq = sc->sc_rxq[GE_RXPRIO_LO]; 887 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 888 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 889 sc->sc_intrmask |= rxq->rxq_intrbits; 890 } 891 892 bail: 893 GE_FUNC_EXIT(sc, ""); 894 return error; 895 } 896 897 void 898 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 899 { 900 struct gfe_rxqueue *rxq = sc->sc_rxq[rxprio]; 901 GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 902 if (rxq == NULL) { 903 GE_FUNC_EXIT(sc, ""); 904 return; 905 } 906 907 if (rxq->rxq_curpkt) 908 m_freem(rxq->rxq_curpkt); 909 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 910 gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 911 free(rxq, M_DEVBUF); 912 sc->sc_rxq[rxprio] = NULL; 913 GE_FUNC_EXIT(sc, ""); 914 } 915 916 void 917 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 918 { 919 GE_FUNC_ENTER(sc, "gfe_rx_stop"); 920 sc->sc_flags &= ~GE_RXACTIVE; 921 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 922 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 923 GE_WRITE(sc, EIMR, sc->sc_intrmask); 924 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); 925 do { 926 delay(10); 927 } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); 928 gfe_rx_cleanup(sc, GE_RXPRIO_HI); 929 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 930 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 931 gfe_rx_cleanup(sc, GE_RXPRIO_LO); 932 GE_FUNC_EXIT(sc, ""); 933 } 934 935 void 936 gfe_tick(void *arg) 937 { 938 struct gfe_softc * const sc = arg; 939 uint32_t intrmask; 940 unsigned int tickflags; 941 int s; 942 943 GE_FUNC_ENTER(sc, "gfe_tick"); 944 945 s = splnet(); 946 947 tickflags = sc->sc_tickflags; 948 sc->sc_tickflags = 0; 949 intrmask = sc->sc_intrmask; 950 if (tickflags & GE_TICK_TX_IFSTART) 951 gfe_ifstart(&sc->sc_ec.ec_if); 952 if (tickflags & GE_TICK_RX_RESTART) { 953 intrmask |= sc->sc_idlemask; 954 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { 955 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_HI]; 956 rxq->rxq_fi = 0; 957 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 958 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 959 } 960 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { 961 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_MEDHI]; 962 rxq->rxq_fi = 0; 963 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 964 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 965 } 966 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { 967 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_MEDLO]; 968 rxq->rxq_fi = 0; 969 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 970 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 971 } 972 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { 973 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_LO]; 974 rxq->rxq_fi = 0; 975 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 976 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 977 } 978 sc->sc_idlemask = 0; 979 } 980 if (intrmask != sc->sc_intrmask) { 981 sc->sc_intrmask = intrmask; 982 GE_WRITE(sc, EIMR, sc->sc_intrmask); 983 } 984 gfe_intr(sc); 985 splx(s); 986 987 GE_FUNC_EXIT(sc, ""); 988 } 989 990 int 991 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 992 { 993 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 994 struct ifnet * const ifp = &sc->sc_ec.ec_if; 995 struct gfe_txqueue * const txq = sc->sc_txq[txprio]; 996 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 997 uint32_t intrmask = sc->sc_intrmask; 998 size_t buflen; 999 struct mbuf *m; 1000 1001 GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1002 1003 /* 1004 * Anything in the pending queue to enqueue? if not, punt. Likewise 1005 * if the txq is not yet created. 1006 * otherwise grab its dmamap. 1007 */ 1008 if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { 1009 GE_FUNC_EXIT(sc, "-"); 1010 return 0; 1011 } 1012 1013 /* 1014 * Have we [over]consumed our limit of descriptors? 1015 * Do we have enough free descriptors? 1016 */ 1017 if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1018 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1019 uint32_t cmdsts; 1020 size_t pktlen; 1021 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1022 cmdsts = gt32toh(txd2->ed_cmdsts); 1023 if (cmdsts & TX_CMD_O) { 1024 int nextin; 1025 /* 1026 * Sometime the Discovery forgets to update the 1027 * last descriptor. See if we own the descriptor 1028 * after it (since we know we've turned that to 1029 * the discovery and if we owned it, the Discovery 1030 * gave it back). If we do, we know the Discovery 1031 * gave back this one but forgot to mark it as ours. 1032 */ 1033 nextin = txq->txq_fi + 1; 1034 if (nextin == GE_TXDESC_MAX) 1035 nextin = 0; 1036 GE_TXDPOSTSYNC(sc, txq, nextin); 1037 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1038 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1039 GE_TXDPRESYNC(sc, txq, nextin); 1040 GE_FUNC_EXIT(sc, "@"); 1041 return 0; 1042 } 1043 #ifdef DEBUG 1044 printf("%s: txenqueue: transmitter resynced at %d\n", 1045 sc->sc_dev.dv_xname, txq->txq_fi); 1046 #endif 1047 } 1048 if (++txq->txq_fi == GE_TXDESC_MAX) 1049 txq->txq_fi = 0; 1050 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1051 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1052 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1053 txq->txq_nactive--; 1054 1055 /* statistics */ 1056 ifp->if_opackets++; 1057 if (cmdsts & TX_STS_ES) 1058 ifp->if_oerrors++; 1059 GE_DPRINTF(sc, ("%%")); 1060 } 1061 1062 buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1063 1064 /* 1065 * If this packet would wrap around the end of the buffer, reset back 1066 * to the beginning. 1067 */ 1068 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1069 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1070 txq->txq_outptr = 0; 1071 } 1072 1073 /* 1074 * Make sure the output packet doesn't run over the beginning of 1075 * what we've already given the GT. 1076 */ 1077 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1078 txq->txq_outptr + buflen > txq->txq_inptr) { 1079 intrmask |= txq->txq_intrbits & 1080 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); 1081 if (sc->sc_intrmask != intrmask) { 1082 sc->sc_intrmask = intrmask; 1083 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1084 } 1085 GE_FUNC_EXIT(sc, "#"); 1086 return 0; 1087 } 1088 1089 /* 1090 * The end-of-list descriptor we put on last time is the starting point 1091 * for this packet. The GT is supposed to terminate list processing on 1092 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1093 * must terminate the list. 1094 */ 1095 intrmask = sc->sc_intrmask; 1096 1097 m_copydata(m, 0, m->m_pkthdr.len, 1098 txq->txq_buf_mem.gdm_kva + txq->txq_outptr); 1099 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1100 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1101 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1102 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1103 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1104 1105 /* 1106 * Request a buffer interrupt every 2/3 of the way thru the transmit 1107 * buffer. 1108 */ 1109 txq->txq_ei_gapcount += buflen; 1110 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1111 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); 1112 txq->txq_ei_gapcount = 0; 1113 } else { 1114 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); 1115 } 1116 #if 0 1117 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1118 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1119 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1120 #endif 1121 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1122 1123 txq->txq_outptr += buflen; 1124 /* 1125 * Tell the SDMA engine to "Fetch!" 1126 */ 1127 GE_WRITE(sc, ESDCMR, 1128 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); 1129 1130 GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1131 1132 /* 1133 * Update the last out appropriately. 1134 */ 1135 txq->txq_nactive++; 1136 if (++txq->txq_lo == GE_TXDESC_MAX) 1137 txq->txq_lo = 0; 1138 1139 /* 1140 * Move mbuf from the pending queue to the snd queue. 1141 */ 1142 IF_DEQUEUE(&txq->txq_pendq, m); 1143 #if NBPFILTER > 0 1144 if (ifp->if_bpf != NULL) 1145 bpf_mtap(ifp->if_bpf, m); 1146 #endif 1147 m_freem(m); 1148 ifp->if_flags &= ~IFF_OACTIVE; 1149 1150 /* 1151 * Since we have put an item into the packet queue, we now want 1152 * an interrupt when the transmit queue finishes processing the 1153 * list. But only update the mask if needs changing. 1154 */ 1155 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); 1156 if (sc->sc_intrmask != intrmask) { 1157 sc->sc_intrmask = intrmask; 1158 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1159 } 1160 if (ifp->if_timer == 0) 1161 ifp->if_timer = 5; 1162 GE_FUNC_EXIT(sc, "*"); 1163 return 1; 1164 } 1165 1166 uint32_t 1167 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1168 { 1169 struct gfe_txqueue * const txq = sc->sc_txq[txprio]; 1170 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1171 1172 GE_FUNC_ENTER(sc, "gfe_tx_done"); 1173 1174 if (txq == NULL) { 1175 GE_FUNC_EXIT(sc, ""); 1176 return intrmask; 1177 } 1178 1179 while (txq->txq_nactive > 0) { 1180 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1181 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1182 uint32_t cmdsts; 1183 size_t pktlen; 1184 1185 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1186 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1187 int nextin; 1188 1189 if (txq->txq_nactive == 1) { 1190 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1191 GE_FUNC_EXIT(sc, ""); 1192 return intrmask; 1193 } 1194 /* 1195 * Sometimes the Discovery forgets to update the 1196 * ownership bit in the descriptor. See if we own the 1197 * descriptor after it (since we know we've turned 1198 * that to the Discovery and if we own it now then the 1199 * Discovery gave it back). If we do, we know the 1200 * Discovery gave back this one but forgot to mark it 1201 * as ours. 1202 */ 1203 nextin = txq->txq_fi + 1; 1204 if (nextin == GE_TXDESC_MAX) 1205 nextin = 0; 1206 GE_TXDPOSTSYNC(sc, txq, nextin); 1207 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1208 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1209 GE_TXDPRESYNC(sc, txq, nextin); 1210 GE_FUNC_EXIT(sc, ""); 1211 return intrmask; 1212 } 1213 #ifdef DEBUG 1214 printf("%s: txdone: transmitter resynced at %d\n", 1215 sc->sc_dev.dv_xname, txq->txq_fi); 1216 #endif 1217 } 1218 #if 0 1219 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1220 txq->txq_lo, 1221 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1222 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1223 #endif 1224 GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1225 if (++txq->txq_fi == GE_TXDESC_MAX) 1226 txq->txq_fi = 0; 1227 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1228 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1229 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1230 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1231 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1232 1233 /* statistics */ 1234 ifp->if_opackets++; 1235 if (cmdsts & TX_STS_ES) 1236 ifp->if_oerrors++; 1237 1238 /* txd->ed_bufptr = 0; */ 1239 1240 ifp->if_timer = 5; 1241 --txq->txq_nactive; 1242 } 1243 if (txq->txq_nactive != 0) 1244 panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1245 sc->sc_dev.dv_xname, txprio, txq->txq_nactive); 1246 ifp->if_timer = 0; 1247 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); 1248 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); 1249 GE_FUNC_EXIT(sc, ""); 1250 return intrmask; 1251 } 1252 1253 int 1254 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1255 { 1256 struct gfe_txqueue *txq; 1257 volatile struct gt_eth_desc *txd; 1258 unsigned int i; 1259 bus_addr_t addr; 1260 1261 GE_FUNC_ENTER(sc, "gfe_tx_start"); 1262 1263 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1264 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1265 1266 if ((txq = sc->sc_txq[txprio]) == NULL) { 1267 int error; 1268 txq = (struct gfe_txqueue *) malloc(sizeof(*txq), 1269 M_DEVBUF, M_NOWAIT); 1270 if (txq == NULL) { 1271 GE_FUNC_EXIT(sc, ""); 1272 return ENOMEM; 1273 } 1274 memset(txq, 0, sizeof(*txq)); 1275 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1276 GE_TXMEM_SIZE, BUS_DMA_NOCACHE); 1277 if (error) { 1278 free(txq, M_DEVBUF); 1279 GE_FUNC_EXIT(sc, ""); 1280 return error; 1281 } 1282 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, 1283 GE_TXBUF_SIZE, 0); 1284 if (error) { 1285 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1286 free(txq, M_DEVBUF); 1287 GE_FUNC_EXIT(sc, ""); 1288 return error; 1289 } 1290 sc->sc_txq[txprio] = txq; 1291 } 1292 1293 txq->txq_descs = 1294 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1295 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1296 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1297 1298 txq->txq_pendq.ifq_maxlen = 10; 1299 txq->txq_ei_gapcount = 0; 1300 txq->txq_nactive = 0; 1301 txq->txq_fi = 0; 1302 txq->txq_lo = 0; 1303 txq->txq_inptr = GE_TXBUF_SIZE; 1304 txq->txq_outptr = 0; 1305 for (i = 0, txd = txq->txq_descs, 1306 addr = txq->txq_desc_busaddr + sizeof(*txd); 1307 i < GE_TXDESC_MAX - 1; 1308 i++, txd++, addr += sizeof(*txd)) { 1309 /* 1310 * update the nxtptr to point to the next txd. 1311 */ 1312 txd->ed_cmdsts = 0; 1313 txd->ed_nxtptr = htogt32(addr); 1314 } 1315 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1316 htogt32(txq->txq_desc_busaddr); 1317 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1318 GE_TXMEM_SIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1319 1320 switch (txprio) { 1321 case GE_TXPRIO_HI: 1322 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; 1323 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1324 txq->txq_epsrbits = ETH_EPSR_TxHigh; 1325 txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); 1326 GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); 1327 break; 1328 1329 case GE_TXPRIO_LO: 1330 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; 1331 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1332 txq->txq_epsrbits = ETH_EPSR_TxLow; 1333 txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); 1334 GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); 1335 break; 1336 1337 case GE_TXPRIO_NONE: 1338 break; 1339 } 1340 #if 0 1341 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1342 gt_write(sc->sc_dev.dv_parent, txq->txq_ectdp, txq->txq_desc_busaddr); 1343 GE_DPRINTF(sc, (")")); 1344 #endif 1345 1346 /* 1347 * If we are restarting, there may be packets in the pending queue 1348 * waiting to be enqueued. Try enqueuing packets from both priority 1349 * queues until the pending queue is empty or there no room for them 1350 * on the device. 1351 */ 1352 while (gfe_tx_enqueue(sc, txprio)) 1353 continue; 1354 1355 GE_FUNC_EXIT(sc, ""); 1356 return 0; 1357 } 1358 1359 void 1360 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1361 { 1362 struct gfe_txqueue * const txq = sc->sc_txq[txprio]; 1363 1364 GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1365 if (txq == NULL) { 1366 GE_FUNC_EXIT(sc, ""); 1367 return; 1368 } 1369 1370 if (!flush) { 1371 GE_FUNC_EXIT(sc, ""); 1372 return; 1373 } 1374 1375 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1376 gfe_dmamem_free(sc, &txq->txq_buf_mem); 1377 free(txq, M_DEVBUF); 1378 sc->sc_txq[txprio] = NULL; 1379 GE_FUNC_EXIT(sc, "-F"); 1380 } 1381 1382 void 1383 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1384 { 1385 GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1386 1387 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); 1388 1389 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1390 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1391 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1392 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1393 1394 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1395 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1396 1397 sc->sc_ec.ec_if.if_timer = 0; 1398 GE_FUNC_EXIT(sc, ""); 1399 } 1400 1401 int 1402 gfe_intr(void *arg) 1403 { 1404 struct gfe_softc * const sc = arg; 1405 uint32_t cause; 1406 uint32_t intrmask = sc->sc_intrmask; 1407 int claim = 0; 1408 int cnt; 1409 1410 GE_FUNC_ENTER(sc, "gfe_intr"); 1411 1412 for (cnt = 0; cnt < 4; cnt++) { 1413 if (sc->sc_intrmask != intrmask) { 1414 sc->sc_intrmask = intrmask; 1415 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1416 } 1417 cause = GE_READ(sc, EICR); 1418 cause &= sc->sc_intrmask; 1419 GE_DPRINTF(sc, (".%#x", cause)); 1420 if (cause == 0) 1421 break; 1422 1423 claim = 1; 1424 1425 GE_WRITE(sc, EICR, ~cause); 1426 #ifndef GE_NORX 1427 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) 1428 intrmask = gfe_rx_process(sc, cause, intrmask); 1429 #endif 1430 1431 #ifndef GE_NOTX 1432 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) 1433 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1434 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) 1435 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1436 #endif 1437 if (cause & ETH_IR_MIIPhySTC) { 1438 sc->sc_flags |= GE_PHYSTSCHG; 1439 /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1440 } 1441 } 1442 1443 while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) 1444 continue; 1445 while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) 1446 continue; 1447 1448 GE_FUNC_EXIT(sc, ""); 1449 return claim; 1450 } 1451 1452 int 1453 gfe_mii_mediachange (struct ifnet *ifp) 1454 { 1455 struct gfe_softc *sc = ifp->if_softc; 1456 1457 if (ifp->if_flags & IFF_UP) 1458 mii_mediachg(&sc->sc_mii); 1459 1460 return (0); 1461 } 1462 void 1463 gfe_mii_mediastatus (struct ifnet *ifp, struct ifmediareq *ifmr) 1464 { 1465 struct gfe_softc *sc = ifp->if_softc; 1466 1467 if (sc->sc_flags & GE_PHYSTSCHG) { 1468 sc->sc_flags &= ~GE_PHYSTSCHG; 1469 mii_pollstat(&sc->sc_mii); 1470 } 1471 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1472 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1473 } 1474 1475 int 1476 gfe_mii_read (struct device *self, int phy, int reg) 1477 { 1478 return gt_mii_read(self, self->dv_parent, phy, reg); 1479 } 1480 1481 void 1482 gfe_mii_write (struct device *self, int phy, int reg, int value) 1483 { 1484 gt_mii_write(self, self->dv_parent, phy, reg, value); 1485 } 1486 1487 void 1488 gfe_mii_statchg (struct device *self) 1489 { 1490 /* struct gfe_softc *sc = (struct gfe_softc *) self; */ 1491 /* do nothing? */ 1492 } 1493 1494 int 1495 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1496 { 1497 int error = 0; 1498 GE_FUNC_ENTER(sc, "gfe_whack"); 1499 1500 switch (op) { 1501 case GE_WHACK_RESTART: 1502 #ifndef GE_NOTX 1503 gfe_tx_stop(sc, op); 1504 #endif 1505 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1506 /* FALLTHROUGH */ 1507 case GE_WHACK_START: 1508 #ifndef GE_NOHASH 1509 if (error == 0 && sc->sc_hashtable == NULL) { 1510 error = gfe_hash_alloc(sc); 1511 if (error) 1512 break; 1513 } 1514 if (op != GE_WHACK_RESTART) 1515 gfe_hash_fill(sc); 1516 #endif 1517 #ifndef GE_NORX 1518 if (op != GE_WHACK_RESTART) { 1519 error = gfe_rx_prime(sc); 1520 if (error) 1521 break; 1522 } 1523 #endif 1524 #ifndef GE_NOTX 1525 error = gfe_tx_start(sc, GE_TXPRIO_HI); 1526 if (error) 1527 break; 1528 #endif 1529 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1530 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1531 GE_WRITE(sc, EPCXR, sc->sc_pcxr); 1532 GE_WRITE(sc, EICR, 0); 1533 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1534 #ifndef GE_NOHASH 1535 GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1536 #endif 1537 #ifndef GE_NORX 1538 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); 1539 sc->sc_flags |= GE_RXACTIVE; 1540 #endif 1541 /* FALLTHROUGH */ 1542 case GE_WHACK_CHANGE: 1543 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1544 GE_READ(sc, EPCR), GE_READ(sc, EIMR))); 1545 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1546 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1547 gfe_ifstart(&sc->sc_ec.ec_if); 1548 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1549 GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); 1550 GE_FUNC_EXIT(sc, ""); 1551 return error; 1552 case GE_WHACK_STOP: 1553 break; 1554 } 1555 1556 #ifdef GE_DEBUG 1557 if (error) 1558 GE_DPRINTF(sc, (" failed: %d\n", error)); 1559 #endif 1560 GE_WRITE(sc, EPCR, sc->sc_pcr); 1561 GE_WRITE(sc, EIMR, 0); 1562 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1563 #ifndef GE_NOTX 1564 gfe_tx_stop(sc, GE_WHACK_STOP); 1565 #endif 1566 #ifndef GE_NORX 1567 gfe_rx_stop(sc, GE_WHACK_STOP); 1568 #endif 1569 #ifndef GE_NOHASH 1570 gfe_dmamem_free(sc, &sc->sc_hash_mem); 1571 sc->sc_hashtable = NULL; 1572 #endif 1573 1574 GE_FUNC_EXIT(sc, ""); 1575 return error; 1576 } 1577 1578 int 1579 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1580 { 1581 uint32_t w0, add0, add1; 1582 uint32_t result; 1583 1584 GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1585 add0 = ((uint32_t) eaddr[5] << 0) | 1586 ((uint32_t) eaddr[4] << 8) | 1587 ((uint32_t) eaddr[3] << 16); 1588 1589 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1590 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1591 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1592 1593 add1 = ((uint32_t) eaddr[2] << 0) | 1594 ((uint32_t) eaddr[1] << 8) | 1595 ((uint32_t) eaddr[0] << 16); 1596 1597 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1598 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1599 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1600 1601 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1602 /* 1603 * hashResult is the 15 bits Hash entry address. 1604 * ethernetADD is a 48 bit number, which is derived from the Ethernet 1605 * MAC address, by nibble swapping in every byte (i.e MAC address 1606 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1607 */ 1608 1609 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1610 /* 1611 * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1612 * 1613 * hashFunc0 calculates the hashResult in the following manner: 1614 * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1615 * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1616 */ 1617 result = (add0 & 3) | ((add0 >> 6) & ~3); 1618 result ^= (add0 >> 15) ^ (add1 >> 0); 1619 result &= 0x1ff; 1620 /* 1621 * hashResult[14:9] = ethernetADD[7:2] 1622 */ 1623 result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1624 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1625 } else { 1626 #define TRIBITFLIP 073516240 /* yes its in octal */ 1627 /* 1628 * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1629 * 1630 * hashFunc1 calculates the hashResult in the following manner: 1631 * hashResult[08:00] = ethernetADD[06:14] 1632 * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1633 */ 1634 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1635 /* 1636 * Now bitswap those 9 bits 1637 */ 1638 result = 0; 1639 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1640 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1641 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1642 1643 /* 1644 * hashResult[14:09] = ethernetADD[00:05] 1645 */ 1646 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1647 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1648 GE_DPRINTF(sc, ("1(%#x)", result)); 1649 } 1650 GE_FUNC_EXIT(sc, ""); 1651 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1652 } 1653 1654 int 1655 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1656 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1657 { 1658 uint64_t he; 1659 uint64_t *maybe_he_p = NULL; 1660 int limit; 1661 int hash; 1662 int maybe_hash = 0; 1663 1664 GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1665 1666 hash = gfe_hash_compute(sc, eaddr); 1667 1668 if (sc->sc_hashtable == NULL) { 1669 panic("%s:%d: hashtable == NULL!", sc->sc_dev.dv_xname, 1670 __LINE__); 1671 } 1672 1673 /* 1674 * Assume we are going to insert so create the hash entry we 1675 * are going to insert. We also use it to match entries we 1676 * will be removing. 1677 */ 1678 he = ((uint64_t) eaddr[5] << 43) | 1679 ((uint64_t) eaddr[4] << 35) | 1680 ((uint64_t) eaddr[3] << 27) | 1681 ((uint64_t) eaddr[2] << 19) | 1682 ((uint64_t) eaddr[1] << 11) | 1683 ((uint64_t) eaddr[0] << 3) | 1684 HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1685 1686 /* 1687 * The GT will search upto 12 entries for a hit, so we must mimic that. 1688 */ 1689 hash &= sc->sc_hashmask / sizeof(he); 1690 for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1691 /* 1692 * Does the GT wrap at the end, stop at the, or overrun the 1693 * end? Assume it wraps for now. Stash a copy of the 1694 * current hash entry. 1695 */ 1696 uint64_t *he_p = &sc->sc_hashtable[hash]; 1697 uint64_t thishe = *he_p; 1698 1699 /* 1700 * If the hash entry isn't valid, that break the chain. And 1701 * this entry a good candidate for reuse. 1702 */ 1703 if ((thishe & HSH_V) == 0) { 1704 maybe_he_p = he_p; 1705 break; 1706 } 1707 1708 /* 1709 * If the hash entry has the same address we are looking for 1710 * then ... if we are removing and the skip bit is set, its 1711 * already been removed. if are adding and the skip bit is 1712 * clear, then its already added. In either return EBUSY 1713 * indicating the op has already been done. Otherwise flip 1714 * the skip bit and return 0. 1715 */ 1716 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1717 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1718 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1719 return EBUSY; 1720 *he_p = thishe ^ HSH_S; 1721 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1722 hash * sizeof(he), sizeof(he), 1723 BUS_DMASYNC_PREWRITE); 1724 GE_FUNC_EXIT(sc, "^"); 1725 return 0; 1726 } 1727 1728 /* 1729 * If we haven't found a slot for the entry and this entry 1730 * is currently being skipped, return this entry. 1731 */ 1732 if (maybe_he_p == NULL && (thishe & HSH_S)) { 1733 maybe_he_p = he_p; 1734 maybe_hash = hash; 1735 } 1736 1737 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1738 } 1739 1740 /* 1741 * If we got here, then there was no entry to remove. 1742 */ 1743 if (op == GE_HASH_REMOVE) { 1744 GE_FUNC_EXIT(sc, "?"); 1745 return ENOENT; 1746 } 1747 1748 /* 1749 * If we couldn't find a slot, return an error. 1750 */ 1751 if (maybe_he_p == NULL) { 1752 GE_FUNC_EXIT(sc, "!"); 1753 return ENOSPC; 1754 } 1755 1756 /* Update the entry. 1757 */ 1758 *maybe_he_p = he; 1759 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1760 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1761 GE_FUNC_EXIT(sc, "+"); 1762 return 0; 1763 } 1764 1765 int 1766 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) 1767 { 1768 struct gfe_softc * const sc = ec->ec_if.if_softc; 1769 int error; 1770 enum gfe_hash_op op; 1771 enum gfe_rxprio prio; 1772 1773 GE_FUNC_ENTER(sc, "hash_multichg"); 1774 /* 1775 * Is this a wildcard entry? If so and its being removed, recompute. 1776 */ 1777 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1778 if (cmd == SIOCDELMULTI) { 1779 GE_FUNC_EXIT(sc, ""); 1780 return ENETRESET; 1781 } 1782 1783 /* 1784 * Switch in 1785 */ 1786 sc->sc_flags |= GE_ALLMULTI; 1787 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1788 sc->sc_pcr |= ETH_EPCR_PM; 1789 GE_WRITE(sc, EPCR, sc->sc_pcr); 1790 GE_FUNC_EXIT(sc, ""); 1791 return 0; 1792 } 1793 GE_FUNC_EXIT(sc, ""); 1794 return ENETRESET; 1795 } 1796 1797 prio = GE_RXPRIO_MEDLO; 1798 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1799 1800 if (sc->sc_hashtable == NULL) { 1801 GE_FUNC_EXIT(sc, ""); 1802 return 0; 1803 } 1804 1805 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1806 if (error == EBUSY) { 1807 printf("%s: multichg: tried to %s %s again\n", 1808 sc->sc_dev.dv_xname, 1809 cmd == SIOCDELMULTI ? "remove" : "add", 1810 ether_sprintf(enm->enm_addrlo)); 1811 GE_FUNC_EXIT(sc, ""); 1812 return 0; 1813 } 1814 1815 if (error == ENOENT) { 1816 printf("%s: multichg: failed to remove %s: not in table\n", 1817 sc->sc_dev.dv_xname, 1818 ether_sprintf(enm->enm_addrlo)); 1819 GE_FUNC_EXIT(sc, ""); 1820 return 0; 1821 } 1822 1823 if (error == ENOSPC) { 1824 printf("%s: multichg: failed to add %s: no space; regenerating table\n", 1825 sc->sc_dev.dv_xname, 1826 ether_sprintf(enm->enm_addrlo)); 1827 GE_FUNC_EXIT(sc, ""); 1828 return ENETRESET; 1829 } 1830 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 1831 sc->sc_dev.dv_xname, 1832 cmd == SIOCDELMULTI ? "remove" : "add", 1833 ether_sprintf(enm->enm_addrlo))); 1834 GE_FUNC_EXIT(sc, ""); 1835 return 0; 1836 } 1837 1838 int 1839 gfe_hash_fill(struct gfe_softc *sc) 1840 { 1841 struct ether_multistep step; 1842 struct ether_multi *enm; 1843 int error; 1844 1845 GE_FUNC_ENTER(sc, "gfe_hash_fill"); 1846 1847 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 1848 LLADDR(sc->sc_ec.ec_if.if_sadl)); 1849 if (error) 1850 GE_FUNC_EXIT(sc, "!"); 1851 return error; 1852 1853 sc->sc_flags &= ~GE_ALLMULTI; 1854 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) 1855 sc->sc_pcr &= ~ETH_EPCR_PM; 1856 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 1857 while (enm != NULL) { 1858 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1859 sc->sc_flags |= GE_ALLMULTI; 1860 sc->sc_pcr |= ETH_EPCR_PM; 1861 } else { 1862 error = gfe_hash_entry_op(sc, GE_HASH_ADD, 1863 GE_RXPRIO_MEDLO, enm->enm_addrlo); 1864 if (error == ENOSPC) 1865 break; 1866 } 1867 ETHER_NEXT_MULTI(step, enm); 1868 } 1869 1870 GE_FUNC_EXIT(sc, ""); 1871 return error; 1872 } 1873 1874 int 1875 gfe_hash_alloc(struct gfe_softc *sc) 1876 { 1877 int error; 1878 GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 1879 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 1880 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 1881 BUS_DMA_NOCACHE); 1882 if (error) { 1883 printf("%s: failed to allocate %d bytes for hash table: %d\n", 1884 sc->sc_dev.dv_xname, sc->sc_hashmask + 1, error); 1885 GE_FUNC_EXIT(sc, ""); 1886 return error; 1887 } 1888 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 1889 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 1890 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1891 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 1892 GE_FUNC_EXIT(sc, ""); 1893 return 0; 1894 } 1895