1 /* $NetBSD: if_gfe.c,v 1.11 2003/06/12 19:18:02 scw Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed for the NetBSD Project by 18 * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 * or promote products derived from this software without specific prior 21 * written permission. 22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * if_gfe.c -- GT ethernet MAC driver 42 */ 43 44 #include "opt_inet.h" 45 #include "bpfilter.h" 46 47 #include <sys/param.h> 48 #include <sys/types.h> 49 #include <sys/inttypes.h> 50 #include <sys/queue.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <sys/callout.h> 55 #include <sys/device.h> 56 #include <sys/errno.h> 57 #include <sys/ioctl.h> 58 #include <sys/mbuf.h> 59 #include <sys/socket.h> 60 61 #include <machine/bus.h> 62 63 #include <net/if.h> 64 #include <net/if_dl.h> 65 #include <net/if_ether.h> 66 #include <net/if_media.h> 67 68 #ifdef INET 69 #include <netinet/in.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 #if NBPFILTER > 0 73 #include <net/bpf.h> 74 #endif 75 76 #include <dev/mii/miivar.h> 77 78 #include <dev/marvell/gtintrreg.h> 79 #include <dev/marvell/gtethreg.h> 80 81 #include <dev/marvell/gtvar.h> 82 #include <dev/marvell/if_gfevar.h> 83 84 #define GE_READ(sc, reg) \ 85 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) 86 #define GE_WRITE(sc, reg, v) \ 87 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) 88 89 #define GE_DEBUG 90 #if 0 91 #define GE_NOHASH 92 #define GE_NORX 93 #endif 94 95 #ifdef GE_DEBUG 96 #define GE_DPRINTF(sc, a) do \ 97 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 98 printf a; \ 99 while (0) 100 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 101 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 102 #else 103 #define GE_DPRINTF(sc, a) do { } while (0) 104 #define GE_FUNC_ENTER(sc, func) do { } while (0) 105 #define GE_FUNC_EXIT(sc, str) do { } while (0) 106 #endif 107 enum gfe_whack_op { 108 GE_WHACK_START, GE_WHACK_RESTART, 109 GE_WHACK_CHANGE, GE_WHACK_STOP 110 }; 111 112 enum gfe_hash_op { 113 GE_HASH_ADD, GE_HASH_REMOVE, 114 }; 115 116 #if 1 117 #define htogt32(a) htobe32(a) 118 #define gt32toh(a) be32toh(a) 119 #else 120 #define htogt32(a) htole32(a) 121 #define gt32toh(a) le32toh(a) 122 #endif 123 124 #define GE_RXDSYNC(sc, rxq, n, ops) \ 125 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 126 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 127 (ops)) 128 #define GE_RXDPRESYNC(sc, rxq, n) \ 129 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 130 #define GE_RXDPOSTSYNC(sc, rxq, n) \ 131 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 132 133 #define GE_TXDSYNC(sc, txq, n, ops) \ 134 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 135 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 136 (ops)) 137 #define GE_TXDPRESYNC(sc, txq, n) \ 138 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 139 #define GE_TXDPOSTSYNC(sc, txq, n) \ 140 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 141 142 #define STATIC 143 144 STATIC int gfe_match (struct device *, struct cfdata *, void *); 145 STATIC void gfe_attach (struct device *, struct device *, void *); 146 147 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 148 size_t, int); 149 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 150 151 STATIC int gfe_ifioctl (struct ifnet *, u_long, caddr_t); 152 STATIC void gfe_ifstart (struct ifnet *); 153 STATIC void gfe_ifwatchdog (struct ifnet *); 154 155 STATIC int gfe_mii_mediachange (struct ifnet *); 156 STATIC void gfe_mii_mediastatus (struct ifnet *, struct ifmediareq *); 157 STATIC int gfe_mii_read (struct device *, int, int); 158 STATIC void gfe_mii_write (struct device *, int, int, int); 159 STATIC void gfe_mii_statchg (struct device *); 160 161 STATIC void gfe_tick(void *arg); 162 163 STATIC void gfe_tx_restart(void *); 164 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 165 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 166 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 167 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 168 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 169 170 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 171 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 172 STATIC int gfe_rx_prime(struct gfe_softc *); 173 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 174 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 175 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 176 177 STATIC int gfe_intr(void *); 178 179 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 180 181 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 182 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 183 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 184 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 185 u_long); 186 STATIC int gfe_hash_fill(struct gfe_softc *); 187 STATIC int gfe_hash_alloc(struct gfe_softc *); 188 189 /* Linkup to the rest of the kernel */ 190 CFATTACH_DECL(gfe, sizeof(struct gfe_softc), 191 gfe_match, gfe_attach, NULL, NULL); 192 193 extern struct cfdriver gfe_cd; 194 195 int 196 gfe_match(struct device *parent, struct cfdata *cf, void *aux) 197 { 198 struct gt_softc *gt = (struct gt_softc *) parent; 199 struct gt_attach_args *ga = aux; 200 uint8_t enaddr[6]; 201 202 if (!GT_ETHEROK(gt, ga, &gfe_cd)) 203 return 0; 204 205 if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) 206 return 0; 207 208 if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && 209 enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) 210 return 0; 211 212 return 1; 213 } 214 215 /* 216 * Attach this instance, and then all the sub-devices 217 */ 218 void 219 gfe_attach(struct device *parent, struct device *self, void *aux) 220 { 221 struct gt_attach_args * const ga = aux; 222 struct gt_softc * const gt = (struct gt_softc *) parent; 223 struct gfe_softc * const sc = (struct gfe_softc *) self; 224 struct ifnet * const ifp = &sc->sc_ec.ec_if; 225 uint32_t data; 226 uint8_t enaddr[6]; 227 int phyaddr; 228 uint32_t sdcr; 229 230 GT_ETHERFOUND(gt, ga); 231 232 sc->sc_gt_memt = ga->ga_memt; 233 sc->sc_gt_memh = ga->ga_memh; 234 sc->sc_dmat = ga->ga_dmat; 235 sc->sc_macno = ga->ga_unit; 236 237 if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, 238 ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { 239 aprint_error(": failed to map registers\n"); 240 } 241 242 callout_init(&sc->sc_co); 243 244 data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); 245 phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); 246 247 gtget_macaddr(gt, sc->sc_macno, enaddr); 248 249 sc->sc_pcr = GE_READ(sc, EPCR); 250 sc->sc_pcxr = GE_READ(sc, EPCXR); 251 sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; 252 253 aprint_normal(": address %s", ether_sprintf(enaddr)); 254 255 #if defined(DEBUG) 256 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 257 #endif 258 259 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 260 if (sc->sc_dev.dv_cfdata->cf_flags & 1) { 261 aprint_normal(", phy %d (rmii)", phyaddr); 262 sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 263 } else { 264 aprint_normal(", phy %d (mii)", phyaddr); 265 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 266 } 267 sc->sc_pcxr &= ~(3 << 14); 268 sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); 269 270 if (sc->sc_pcr & ETH_EPCR_EN) { 271 int tries = 1000; 272 /* 273 * Abort transmitter and receiver and wait for them to quiese 274 */ 275 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); 276 do { 277 delay(100); 278 } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); 279 } 280 281 sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 282 283 #if defined(DEBUG) 284 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); 285 #endif 286 287 /* 288 * Now turn off the GT. If it didn't quiese, too ***ing bad. 289 */ 290 GE_WRITE(sc, EPCR, sc->sc_pcr); 291 GE_WRITE(sc, EIMR, sc->sc_intrmask); 292 sdcr = GE_READ(sc, ESDCR); 293 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 294 sdcr |= ETH_ESDCR_RIFB; 295 GE_WRITE(sc, ESDCR, sdcr); 296 sc->sc_max_frame_length = 1536; 297 298 aprint_normal("\n"); 299 sc->sc_mii.mii_ifp = ifp; 300 sc->sc_mii.mii_readreg = gfe_mii_read; 301 sc->sc_mii.mii_writereg = gfe_mii_write; 302 sc->sc_mii.mii_statchg = gfe_mii_statchg; 303 304 ifmedia_init(&sc->sc_mii.mii_media, 0, gfe_mii_mediachange, 305 gfe_mii_mediastatus); 306 307 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, 308 MII_OFFSET_ANY, MIIF_NOISOLATE); 309 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 310 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 311 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 312 } else { 313 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 314 } 315 316 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 317 ifp->if_softc = sc; 318 /* ifp->if_mowner = &sc->sc_mowner; */ 319 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 320 #if 0 321 ifp->if_flags |= IFF_DEBUG; 322 #endif 323 ifp->if_ioctl = gfe_ifioctl; 324 ifp->if_start = gfe_ifstart; 325 ifp->if_watchdog = gfe_ifwatchdog; 326 327 if_attach(ifp); 328 ether_ifattach(ifp, enaddr); 329 #if NBPFILTER > 0 330 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 331 #endif 332 #if NRND > 0 333 rnd_attach_source(&sc->sc_rnd_source, self->dv_xname, RND_TYPE_NET, 0); 334 #endif 335 intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, 336 gfe_intr, sc); 337 } 338 339 int 340 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 341 size_t size, int flags) 342 { 343 int error = 0; 344 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 345 gdm->gdm_size = size; 346 gdm->gdm_maxsegs = maxsegs; 347 348 flags |= BUS_DMA_COHERENT; 349 350 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 351 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 352 BUS_DMA_NOWAIT); 353 if (error) 354 goto fail; 355 356 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 357 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 358 if (error) 359 goto fail; 360 361 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 362 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); 363 if (error) 364 goto fail; 365 366 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 367 gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 368 if (error) 369 goto fail; 370 371 /* invalidate from cache */ 372 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 373 BUS_DMASYNC_PREREAD); 374 fail: 375 if (error) { 376 gfe_dmamem_free(sc, gdm); 377 GE_DPRINTF(sc, (":err=%d", error)); 378 } 379 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 380 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 381 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 382 GE_FUNC_EXIT(sc, ""); 383 return error; 384 } 385 386 void 387 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 388 { 389 GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 390 if (gdm->gdm_map) 391 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 392 if (gdm->gdm_kva) 393 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 394 if (gdm->gdm_nsegs > 0) 395 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 396 gdm->gdm_map = NULL; 397 gdm->gdm_kva = NULL; 398 gdm->gdm_nsegs = 0; 399 GE_FUNC_EXIT(sc, ""); 400 } 401 402 int 403 gfe_ifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 404 { 405 struct gfe_softc * const sc = ifp->if_softc; 406 struct ifreq *ifr = (struct ifreq *) data; 407 struct ifaddr *ifa = (struct ifaddr *) data; 408 int s, error = 0; 409 410 GE_FUNC_ENTER(sc, "gfe_ifioctl"); 411 s = splnet(); 412 413 switch (cmd) { 414 case SIOCSIFADDR: 415 ifp->if_flags |= IFF_UP; 416 switch (ifa->ifa_addr->sa_family) { 417 #ifdef INET 418 case AF_INET: 419 error = gfe_whack(sc, GE_WHACK_START); 420 if (error == 0) 421 arp_ifinit(ifp, ifa); 422 break; 423 #endif 424 default: 425 error = gfe_whack(sc, GE_WHACK_START); 426 break; 427 } 428 break; 429 430 case SIOCSIFFLAGS: 431 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 432 case IFF_UP|IFF_RUNNING:/* active->active, update */ 433 error = gfe_whack(sc, GE_WHACK_CHANGE); 434 break; 435 case IFF_RUNNING: /* not up, so we stop */ 436 error = gfe_whack(sc, GE_WHACK_STOP); 437 break; 438 case IFF_UP: /* not running, so we start */ 439 error = gfe_whack(sc, GE_WHACK_START); 440 break; 441 case 0: /* idle->idle: do nothing */ 442 break; 443 } 444 break; 445 446 case SIOCADDMULTI: 447 case SIOCDELMULTI: 448 error = (cmd == SIOCADDMULTI) 449 ? ether_addmulti(ifr, &sc->sc_ec) 450 : ether_delmulti(ifr, &sc->sc_ec); 451 if (error == ENETRESET) { 452 if (ifp->if_flags & IFF_RUNNING) 453 error = gfe_whack(sc, GE_WHACK_CHANGE); 454 else 455 error = 0; 456 } 457 break; 458 459 case SIOCSIFMTU: 460 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 461 error = EINVAL; 462 break; 463 } 464 ifp->if_mtu = ifr->ifr_mtu; 465 break; 466 467 case SIOCSIFMEDIA: 468 case SIOCGIFMEDIA: 469 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 470 break; 471 472 default: 473 error = EINVAL; 474 break; 475 } 476 splx(s); 477 GE_FUNC_EXIT(sc, ""); 478 return error; 479 } 480 481 void 482 gfe_ifstart(struct ifnet *ifp) 483 { 484 struct gfe_softc * const sc = ifp->if_softc; 485 struct mbuf *m; 486 487 GE_FUNC_ENTER(sc, "gfe_ifstart"); 488 489 if ((ifp->if_flags & IFF_RUNNING) == 0) { 490 GE_FUNC_EXIT(sc, "$"); 491 return; 492 } 493 494 if (sc->sc_txq[GE_TXPRIO_HI] == NULL) { 495 ifp->if_flags |= IFF_OACTIVE; 496 #if defined(DEBUG) || defined(DIAGNOSTIC) 497 printf("%s: ifstart: txq not yet created\n", ifp->if_xname); 498 #endif 499 GE_FUNC_EXIT(sc, ""); 500 return; 501 } 502 503 for (;;) { 504 IF_DEQUEUE(&ifp->if_snd, m); 505 if (m == NULL) { 506 ifp->if_flags &= ~IFF_OACTIVE; 507 GE_FUNC_EXIT(sc, ""); 508 return; 509 } 510 511 /* 512 * No space in the pending queue? try later. 513 */ 514 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI]->txq_pendq)) 515 break; 516 517 /* 518 * Try to enqueue a mbuf to the device. If that fails, we 519 * can always try to map the next mbuf. 520 */ 521 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI]->txq_pendq, m); 522 GE_DPRINTF(sc, (">")); 523 #ifndef GE_NOTX 524 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 525 #endif 526 } 527 528 /* 529 * Attempt to queue the mbuf for send failed. 530 */ 531 IF_PREPEND(&ifp->if_snd, m); 532 ifp->if_flags |= IFF_OACTIVE; 533 GE_FUNC_EXIT(sc, "%%"); 534 } 535 536 void 537 gfe_ifwatchdog(struct ifnet *ifp) 538 { 539 struct gfe_softc * const sc = ifp->if_softc; 540 struct gfe_txqueue *txq; 541 542 GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 543 printf("%s: device timeout", sc->sc_dev.dv_xname); 544 if ((txq = sc->sc_txq[GE_TXPRIO_HI]) != NULL) { 545 uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 546 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 547 GE_TXDPOSTSYNC(sc, txq, curtxdnum); 548 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 549 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 550 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 551 GE_READ(sc, EICR)); 552 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 553 GE_TXDPRESYNC(sc, txq, curtxdnum); 554 } 555 printf("\n"); 556 ifp->if_oerrors++; 557 (void) gfe_whack(sc, GE_WHACK_RESTART); 558 GE_FUNC_EXIT(sc, ""); 559 } 560 561 int 562 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 563 { 564 struct gfe_rxqueue *rxq; 565 volatile struct gt_eth_desc *rxd; 566 const bus_dma_segment_t *ds; 567 int error; 568 int idx; 569 bus_addr_t nxtaddr; 570 bus_size_t boff; 571 572 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 573 GE_DPRINTF(sc, ("(%d)", rxprio)); 574 if (sc->sc_rxq[rxprio] != NULL) { 575 GE_FUNC_EXIT(sc, ""); 576 return 0; 577 } 578 579 rxq = (struct gfe_rxqueue *) malloc(sizeof(*rxq), M_DEVBUF, M_NOWAIT); 580 if (rxq == NULL) { 581 GE_FUNC_EXIT(sc, "!"); 582 return ENOMEM; 583 } 584 585 memset(rxq, 0, sizeof(*rxq)); 586 587 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 588 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 589 if (error) { 590 free(rxq, M_DEVBUF); 591 GE_FUNC_EXIT(sc, "!!"); 592 return error; 593 } 594 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 595 GE_RXBUF_MEMSIZE, 0); 596 if (error) { 597 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 598 free(rxq, M_DEVBUF); 599 GE_FUNC_EXIT(sc, "!!!"); 600 return error; 601 } 602 603 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_TXMEM_SIZE); 604 605 sc->sc_rxq[rxprio] = rxq; 606 rxq->rxq_descs = 607 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 608 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 609 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 610 rxq->rxq_fi = 0; 611 rxq->rxq_active = GE_RXDESC_MAX; 612 for (idx = 0, rxd = rxq->rxq_descs, 613 boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, 614 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 615 idx < GE_RXDESC_MAX; 616 idx++, rxd++, nxtaddr += sizeof(*rxd)) { 617 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 618 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 619 rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 620 /* 621 * update the nxtptr to point to the next txd. 622 */ 623 if (idx == GE_RXDESC_MAX - 1) 624 nxtaddr = rxq->rxq_desc_busaddr; 625 rxd->ed_nxtptr = htogt32(nxtaddr); 626 boff += GE_RXBUF_SIZE; 627 if (boff == ds->ds_len) { 628 ds++; 629 boff = 0; 630 } 631 } 632 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 633 rxq->rxq_desc_mem.gdm_map->dm_mapsize, 634 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 635 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 636 rxq->rxq_buf_mem.gdm_map->dm_mapsize, 637 BUS_DMASYNC_PREREAD); 638 639 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; 640 switch (rxprio) { 641 case GE_RXPRIO_HI: 642 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; 643 rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); 644 rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); 645 break; 646 case GE_RXPRIO_MEDHI: 647 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; 648 rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); 649 rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); 650 break; 651 case GE_RXPRIO_MEDLO: 652 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; 653 rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); 654 rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); 655 break; 656 case GE_RXPRIO_LO: 657 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; 658 rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); 659 rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); 660 break; 661 } 662 GE_FUNC_EXIT(sc, ""); 663 return error; 664 } 665 666 void 667 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 668 { 669 struct ifnet * const ifp = &sc->sc_ec.ec_if; 670 struct gfe_rxqueue * const rxq = sc->sc_rxq[rxprio]; 671 struct mbuf *m = rxq->rxq_curpkt; 672 673 GE_FUNC_ENTER(sc, "gfe_rx_get"); 674 GE_DPRINTF(sc, ("(%d)", rxprio)); 675 676 while (rxq->rxq_active > 0) { 677 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 678 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 679 const struct ether_header *eh; 680 unsigned int cmdsts; 681 size_t buflen; 682 683 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 684 cmdsts = gt32toh(rxd->ed_cmdsts); 685 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 686 rxq->rxq_cmdsts = cmdsts; 687 /* 688 * Sometimes the GE "forgets" to reset the ownership bit. 689 * But if the length has been rewritten, the packet is ours 690 * so pretend the O bit is set. 691 */ 692 buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 693 if ((cmdsts & RX_CMD_O) && buflen == 0) { 694 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 695 break; 696 } 697 698 /* 699 * If this is not a single buffer packet with no errors 700 * or for some reason it's bigger than our frame size, 701 * ignore it and go to the next packet. 702 */ 703 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != 704 (RX_CMD_F|RX_CMD_L) || 705 buflen > sc->sc_max_frame_length) { 706 GE_DPRINTF(sc, ("!")); 707 --rxq->rxq_active; 708 ifp->if_ipackets++; 709 ifp->if_ierrors++; 710 goto give_it_back; 711 } 712 713 if (m == NULL) { 714 MGETHDR(m, M_DONTWAIT, MT_DATA); 715 if (m == NULL) { 716 GE_DPRINTF(sc, ("?")); 717 break; 718 } 719 } 720 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 721 MCLGET(m, M_DONTWAIT); 722 if ((m->m_flags & M_EXT) == 0) { 723 GE_DPRINTF(sc, ("?")); 724 break; 725 } 726 } 727 m->m_data += 2; 728 m->m_len = 0; 729 m->m_pkthdr.len = 0; 730 m->m_pkthdr.rcvif = ifp; 731 rxq->rxq_cmdsts = cmdsts; 732 --rxq->rxq_active; 733 734 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 735 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 736 737 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 738 memcpy(m->m_data + m->m_len, rxb->rb_data, buflen); 739 m->m_len = buflen; 740 m->m_pkthdr.len = buflen; 741 m->m_flags |= M_HASFCS; 742 743 ifp->if_ipackets++; 744 #if NBPFILTER > 0 745 if (ifp->if_bpf != NULL) 746 bpf_mtap(ifp->if_bpf, m); 747 #endif 748 749 eh = (const struct ether_header *) m->m_data; 750 if ((ifp->if_flags & IFF_PROMISC) || 751 (rxq->rxq_cmdsts & RX_STS_M) == 0 || 752 (rxq->rxq_cmdsts & RX_STS_HE) || 753 (eh->ether_dhost[0] & 1) != 0 || 754 memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl), 755 ETHER_ADDR_LEN) == 0) { 756 (*ifp->if_input)(ifp, m); 757 m = NULL; 758 GE_DPRINTF(sc, (">")); 759 } else { 760 m->m_len = 0; 761 m->m_pkthdr.len = 0; 762 GE_DPRINTF(sc, ("+")); 763 } 764 rxq->rxq_cmdsts = 0; 765 766 give_it_back: 767 rxd->ed_lencnt &= ~0xffff; /* zero out length */ 768 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 769 #if 0 770 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 771 rxq->rxq_fi, 772 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 773 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 774 #endif 775 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 776 if (++rxq->rxq_fi == GE_RXDESC_MAX) 777 rxq->rxq_fi = 0; 778 rxq->rxq_active++; 779 } 780 rxq->rxq_curpkt = m; 781 GE_FUNC_EXIT(sc, ""); 782 } 783 784 uint32_t 785 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 786 { 787 struct ifnet * const ifp = &sc->sc_ec.ec_if; 788 struct gfe_rxqueue *rxq; 789 uint32_t rxbits; 790 #define RXPRIO_DECODER 0xffffaa50 791 GE_FUNC_ENTER(sc, "gfe_rx_process"); 792 793 rxbits = ETH_IR_RxBuffer_GET(cause); 794 while (rxbits) { 795 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 796 GE_DPRINTF(sc, ("%1x", rxbits)); 797 rxbits &= ~(1 << rxprio); 798 gfe_rx_get(sc, rxprio); 799 } 800 801 rxbits = ETH_IR_RxError_GET(cause); 802 while (rxbits) { 803 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 804 uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 805 int idx; 806 rxbits &= ~(1 << rxprio); 807 rxq = sc->sc_rxq[rxprio]; 808 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 809 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 810 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 811 sc->sc_tickflags |= GE_TICK_RX_RESTART; 812 callout_reset(&sc->sc_co, 1, gfe_tick, sc); 813 } 814 ifp->if_ierrors++; 815 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 816 sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi)); 817 memset(masks, 0, sizeof(masks)); 818 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 819 0, rxq->rxq_desc_mem.gdm_size, 820 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 821 for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 822 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 823 824 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 825 masks[idx/32] |= 1 << (idx & 31); 826 } 827 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 828 0, rxq->rxq_desc_mem.gdm_size, 829 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 830 #if defined(DEBUG) 831 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 832 sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi, 833 rxq->rxq_cmdsts, masks[0], masks[1]); 834 #endif 835 } 836 if ((intrmask & ETH_IR_RxBits) == 0) 837 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); 838 839 GE_FUNC_EXIT(sc, ""); 840 return intrmask; 841 } 842 843 int 844 gfe_rx_prime(struct gfe_softc *sc) 845 { 846 struct gfe_rxqueue *rxq; 847 int error; 848 849 GE_FUNC_ENTER(sc, "gfe_rx_prime"); 850 851 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 852 if (error) 853 goto bail; 854 rxq = sc->sc_rxq[GE_RXPRIO_HI]; 855 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 856 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 857 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 858 } 859 sc->sc_intrmask |= rxq->rxq_intrbits; 860 861 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 862 if (error) 863 goto bail; 864 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 865 rxq = sc->sc_rxq[GE_RXPRIO_MEDHI]; 866 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 867 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 868 sc->sc_intrmask |= rxq->rxq_intrbits; 869 } 870 871 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 872 if (error) 873 goto bail; 874 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 875 rxq = sc->sc_rxq[GE_RXPRIO_MEDLO]; 876 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 877 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 878 sc->sc_intrmask |= rxq->rxq_intrbits; 879 } 880 881 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 882 if (error) 883 goto bail; 884 if ((sc->sc_flags & GE_RXACTIVE) == 0) { 885 rxq = sc->sc_rxq[GE_RXPRIO_LO]; 886 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 887 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 888 sc->sc_intrmask |= rxq->rxq_intrbits; 889 } 890 891 bail: 892 GE_FUNC_EXIT(sc, ""); 893 return error; 894 } 895 896 void 897 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 898 { 899 struct gfe_rxqueue *rxq = sc->sc_rxq[rxprio]; 900 GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 901 if (rxq == NULL) { 902 GE_FUNC_EXIT(sc, ""); 903 return; 904 } 905 906 if (rxq->rxq_curpkt) 907 m_freem(rxq->rxq_curpkt); 908 gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 909 gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 910 free(rxq, M_DEVBUF); 911 sc->sc_rxq[rxprio] = NULL; 912 GE_FUNC_EXIT(sc, ""); 913 } 914 915 void 916 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 917 { 918 GE_FUNC_ENTER(sc, "gfe_rx_stop"); 919 sc->sc_flags &= ~GE_RXACTIVE; 920 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 921 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); 922 GE_WRITE(sc, EIMR, sc->sc_intrmask); 923 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); 924 do { 925 delay(10); 926 } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); 927 gfe_rx_cleanup(sc, GE_RXPRIO_HI); 928 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 929 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 930 gfe_rx_cleanup(sc, GE_RXPRIO_LO); 931 GE_FUNC_EXIT(sc, ""); 932 } 933 934 void 935 gfe_tick(void *arg) 936 { 937 struct gfe_softc * const sc = arg; 938 uint32_t intrmask; 939 unsigned int tickflags; 940 int s; 941 942 GE_FUNC_ENTER(sc, "gfe_tick"); 943 944 s = splnet(); 945 946 tickflags = sc->sc_tickflags; 947 sc->sc_tickflags = 0; 948 intrmask = sc->sc_intrmask; 949 if (tickflags & GE_TICK_TX_IFSTART) 950 gfe_ifstart(&sc->sc_ec.ec_if); 951 if (tickflags & GE_TICK_RX_RESTART) { 952 intrmask |= sc->sc_idlemask; 953 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { 954 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_HI]; 955 rxq->rxq_fi = 0; 956 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); 957 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); 958 } 959 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { 960 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_MEDHI]; 961 rxq->rxq_fi = 0; 962 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); 963 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); 964 } 965 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { 966 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_MEDLO]; 967 rxq->rxq_fi = 0; 968 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); 969 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); 970 } 971 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { 972 struct gfe_rxqueue *rxq = sc->sc_rxq[GE_RXPRIO_LO]; 973 rxq->rxq_fi = 0; 974 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); 975 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); 976 } 977 sc->sc_idlemask = 0; 978 } 979 if (intrmask != sc->sc_intrmask) { 980 sc->sc_intrmask = intrmask; 981 GE_WRITE(sc, EIMR, sc->sc_intrmask); 982 } 983 gfe_intr(sc); 984 splx(s); 985 986 GE_FUNC_EXIT(sc, ""); 987 } 988 989 int 990 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 991 { 992 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 993 struct ifnet * const ifp = &sc->sc_ec.ec_if; 994 struct gfe_txqueue * const txq = sc->sc_txq[txprio]; 995 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 996 uint32_t intrmask = sc->sc_intrmask; 997 size_t buflen; 998 struct mbuf *m; 999 1000 GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1001 1002 /* 1003 * Anything in the pending queue to enqueue? if not, punt. 1004 * otherwise grab its dmamap. 1005 */ 1006 if ((m = txq->txq_pendq.ifq_head) == NULL) { 1007 GE_FUNC_EXIT(sc, "-"); 1008 return 0; 1009 } 1010 1011 /* 1012 * Have we [over]consumed our limit of descriptors? 1013 * Do we have enough free descriptors? 1014 */ 1015 if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1016 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1017 uint32_t cmdsts; 1018 size_t pktlen; 1019 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1020 cmdsts = gt32toh(txd2->ed_cmdsts); 1021 if (cmdsts & TX_CMD_O) { 1022 int nextin; 1023 /* 1024 * Sometime the Discovery forgets to update the 1025 * last descriptor. See if we own the descriptor 1026 * after it (since we know we've turned that to 1027 * the discovery and if we owned it, the Discovery 1028 * gave it back). If we do, we know the Discovery 1029 * gave back this one but forgot to mark it as ours. 1030 */ 1031 nextin = txq->txq_fi + 1; 1032 if (nextin == GE_TXDESC_MAX) 1033 nextin = 0; 1034 GE_TXDPOSTSYNC(sc, txq, nextin); 1035 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1036 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1037 GE_TXDPRESYNC(sc, txq, nextin); 1038 GE_FUNC_EXIT(sc, "@"); 1039 return 0; 1040 } 1041 #ifdef DEBUG 1042 printf("%s: txenqueue: transmitter resynced at %d\n", 1043 sc->sc_dev.dv_xname, txq->txq_fi); 1044 #endif 1045 } 1046 if (++txq->txq_fi == GE_TXDESC_MAX) 1047 txq->txq_fi = 0; 1048 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1049 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1050 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1051 txq->txq_nactive--; 1052 1053 /* statistics */ 1054 ifp->if_opackets++; 1055 if (cmdsts & TX_STS_ES) 1056 ifp->if_oerrors++; 1057 GE_DPRINTF(sc, ("%%")); 1058 } 1059 1060 buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1061 1062 /* 1063 * If this packet would wrap around the end of the buffer, reset back 1064 * to the beginning. 1065 */ 1066 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1067 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1068 txq->txq_outptr = 0; 1069 } 1070 1071 /* 1072 * Make sure the output packet doesn't run over the beginning of 1073 * what we've already given the GT. 1074 */ 1075 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1076 txq->txq_outptr + buflen > txq->txq_inptr) { 1077 intrmask |= txq->txq_intrbits & 1078 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); 1079 if (sc->sc_intrmask != intrmask) { 1080 sc->sc_intrmask = intrmask; 1081 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1082 } 1083 GE_FUNC_EXIT(sc, "#"); 1084 return 0; 1085 } 1086 1087 /* 1088 * The end-of-list descriptor we put on last time is the starting point 1089 * for this packet. The GT is supposed to terminate list processing on 1090 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1091 * must terminate the list. 1092 */ 1093 intrmask = sc->sc_intrmask; 1094 1095 m_copydata(m, 0, m->m_pkthdr.len, 1096 txq->txq_buf_mem.gdm_kva + txq->txq_outptr); 1097 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1098 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1099 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1100 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1101 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1102 1103 /* 1104 * Request a buffer interrupt every 2/3 of the way thru the transmit 1105 * buffer. 1106 */ 1107 txq->txq_ei_gapcount += buflen; 1108 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1109 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); 1110 txq->txq_ei_gapcount = 0; 1111 } else { 1112 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); 1113 } 1114 #if 0 1115 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1116 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1117 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1118 #endif 1119 GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1120 1121 txq->txq_outptr += buflen; 1122 /* 1123 * Tell the SDMA engine to "Fetch!" 1124 */ 1125 GE_WRITE(sc, ESDCMR, 1126 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); 1127 1128 GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1129 1130 /* 1131 * Update the last out appropriately. 1132 */ 1133 txq->txq_nactive++; 1134 if (++txq->txq_lo == GE_TXDESC_MAX) 1135 txq->txq_lo = 0; 1136 1137 /* 1138 * Move mbuf from the pending queue to the snd queue. 1139 */ 1140 IF_DEQUEUE(&txq->txq_pendq, m); 1141 #if NBPFILTER > 0 1142 if (ifp->if_bpf != NULL) 1143 bpf_mtap(ifp->if_bpf, m); 1144 #endif 1145 m_freem(m); 1146 ifp->if_flags &= ~IFF_OACTIVE; 1147 1148 /* 1149 * Since we have put an item into the packet queue, we now want 1150 * an interrupt when the transmit queue finishes processing the 1151 * list. But only update the mask if needs changing. 1152 */ 1153 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); 1154 if (sc->sc_intrmask != intrmask) { 1155 sc->sc_intrmask = intrmask; 1156 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1157 } 1158 if (ifp->if_timer == 0) 1159 ifp->if_timer = 5; 1160 GE_FUNC_EXIT(sc, "*"); 1161 return 1; 1162 } 1163 1164 uint32_t 1165 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1166 { 1167 struct gfe_txqueue * const txq = sc->sc_txq[txprio]; 1168 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1169 1170 GE_FUNC_ENTER(sc, "gfe_tx_done"); 1171 1172 if (txq == NULL) { 1173 GE_FUNC_EXIT(sc, ""); 1174 return intrmask; 1175 } 1176 1177 while (txq->txq_nactive > 0) { 1178 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1179 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1180 uint32_t cmdsts; 1181 size_t pktlen; 1182 1183 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1184 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1185 int nextin; 1186 1187 if (txq->txq_nactive == 1) { 1188 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1189 GE_FUNC_EXIT(sc, ""); 1190 return intrmask; 1191 } 1192 /* 1193 * Sometimes the Discovery forgets to update the 1194 * ownership bit in the descriptor. See if we own the 1195 * descriptor after it (since we know we've turned 1196 * that to the Discovery and if we own it now then the 1197 * Discovery gave it back). If we do, we know the 1198 * Discovery gave back this one but forgot to mark it 1199 * as ours. 1200 */ 1201 nextin = txq->txq_fi + 1; 1202 if (nextin == GE_TXDESC_MAX) 1203 nextin = 0; 1204 GE_TXDPOSTSYNC(sc, txq, nextin); 1205 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1206 GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1207 GE_TXDPRESYNC(sc, txq, nextin); 1208 GE_FUNC_EXIT(sc, ""); 1209 return intrmask; 1210 } 1211 #ifdef DEBUG 1212 printf("%s: txdone: transmitter resynced at %d\n", 1213 sc->sc_dev.dv_xname, txq->txq_fi); 1214 #endif 1215 } 1216 #if 0 1217 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1218 txq->txq_lo, 1219 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1220 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1221 #endif 1222 GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1223 if (++txq->txq_fi == GE_TXDESC_MAX) 1224 txq->txq_fi = 0; 1225 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1226 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1227 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1228 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1229 txq->txq_inptr += roundup(pktlen, dcache_line_size); 1230 1231 /* statistics */ 1232 ifp->if_opackets++; 1233 if (cmdsts & TX_STS_ES) 1234 ifp->if_oerrors++; 1235 1236 /* txd->ed_bufptr = 0; */ 1237 1238 ifp->if_timer = 5; 1239 --txq->txq_nactive; 1240 } 1241 if (txq->txq_nactive != 0) 1242 panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1243 sc->sc_dev.dv_xname, txprio, txq->txq_nactive); 1244 ifp->if_timer = 0; 1245 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); 1246 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); 1247 GE_FUNC_EXIT(sc, ""); 1248 return intrmask; 1249 } 1250 1251 int 1252 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1253 { 1254 struct gfe_txqueue *txq; 1255 volatile struct gt_eth_desc *txd; 1256 unsigned int i; 1257 bus_addr_t addr; 1258 1259 GE_FUNC_ENTER(sc, "gfe_tx_start"); 1260 1261 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1262 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1263 1264 if ((txq = sc->sc_txq[txprio]) == NULL) { 1265 int error; 1266 txq = (struct gfe_txqueue *) malloc(sizeof(*txq), 1267 M_DEVBUF, M_NOWAIT); 1268 if (txq == NULL) { 1269 GE_FUNC_EXIT(sc, ""); 1270 return ENOMEM; 1271 } 1272 memset(txq, 0, sizeof(*txq)); 1273 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1274 GE_TXMEM_SIZE, BUS_DMA_NOCACHE); 1275 if (error) { 1276 free(txq, M_DEVBUF); 1277 GE_FUNC_EXIT(sc, ""); 1278 return error; 1279 } 1280 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, 1281 GE_TXBUF_SIZE, 0); 1282 if (error) { 1283 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1284 free(txq, M_DEVBUF); 1285 GE_FUNC_EXIT(sc, ""); 1286 return error; 1287 } 1288 sc->sc_txq[txprio] = txq; 1289 } 1290 1291 txq->txq_descs = 1292 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1293 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1294 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1295 1296 txq->txq_pendq.ifq_maxlen = 10; 1297 txq->txq_ei_gapcount = 0; 1298 txq->txq_nactive = 0; 1299 txq->txq_fi = 0; 1300 txq->txq_lo = 0; 1301 txq->txq_inptr = GE_TXBUF_SIZE; 1302 txq->txq_outptr = 0; 1303 for (i = 0, txd = txq->txq_descs, 1304 addr = txq->txq_desc_busaddr + sizeof(*txd); 1305 i < GE_TXDESC_MAX - 1; 1306 i++, txd++, addr += sizeof(*txd)) { 1307 /* 1308 * update the nxtptr to point to the next txd. 1309 */ 1310 txd->ed_cmdsts = 0; 1311 txd->ed_nxtptr = htogt32(addr); 1312 } 1313 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1314 htogt32(txq->txq_desc_busaddr); 1315 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1316 GE_TXMEM_SIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1317 1318 switch (txprio) { 1319 case GE_TXPRIO_HI: 1320 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; 1321 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1322 txq->txq_epsrbits = ETH_EPSR_TxHigh; 1323 txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); 1324 GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); 1325 break; 1326 1327 case GE_TXPRIO_LO: 1328 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; 1329 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1330 txq->txq_epsrbits = ETH_EPSR_TxLow; 1331 txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); 1332 GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); 1333 break; 1334 1335 case GE_TXPRIO_NONE: 1336 break; 1337 } 1338 #if 0 1339 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1340 gt_write(sc->sc_dev.dv_parent, txq->txq_ectdp, txq->txq_desc_busaddr); 1341 GE_DPRINTF(sc, (")")); 1342 #endif 1343 1344 /* 1345 * If we are restarting, there may be packets in the pending queue 1346 * waiting to be enqueued. Try enqueuing packets from both priority 1347 * queues until the pending queue is empty or there no room for them 1348 * on the device. 1349 */ 1350 while (gfe_tx_enqueue(sc, txprio)) 1351 continue; 1352 1353 GE_FUNC_EXIT(sc, ""); 1354 return 0; 1355 } 1356 1357 void 1358 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1359 { 1360 struct gfe_txqueue * const txq = sc->sc_txq[txprio]; 1361 1362 GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1363 if (txq == NULL) { 1364 GE_FUNC_EXIT(sc, ""); 1365 return; 1366 } 1367 1368 if (!flush) { 1369 GE_FUNC_EXIT(sc, ""); 1370 return; 1371 } 1372 1373 gfe_dmamem_free(sc, &txq->txq_desc_mem); 1374 gfe_dmamem_free(sc, &txq->txq_buf_mem); 1375 free(txq, M_DEVBUF); 1376 sc->sc_txq[txprio] = NULL; 1377 GE_FUNC_EXIT(sc, "-F"); 1378 } 1379 1380 void 1381 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1382 { 1383 GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1384 1385 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); 1386 1387 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1388 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1389 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| 1390 ETH_IR_TxEndLow |ETH_IR_TxBufferLow); 1391 1392 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1393 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1394 1395 sc->sc_ec.ec_if.if_timer = 0; 1396 GE_FUNC_EXIT(sc, ""); 1397 } 1398 1399 int 1400 gfe_intr(void *arg) 1401 { 1402 struct gfe_softc * const sc = arg; 1403 uint32_t cause; 1404 uint32_t intrmask = sc->sc_intrmask; 1405 int claim = 0; 1406 int cnt; 1407 1408 GE_FUNC_ENTER(sc, "gfe_intr"); 1409 1410 for (cnt = 0; cnt < 4; cnt++) { 1411 if (sc->sc_intrmask != intrmask) { 1412 sc->sc_intrmask = intrmask; 1413 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1414 } 1415 cause = GE_READ(sc, EICR); 1416 cause &= sc->sc_intrmask; 1417 GE_DPRINTF(sc, (".%#x", cause)); 1418 if (cause == 0) 1419 break; 1420 1421 claim = 1; 1422 1423 GE_WRITE(sc, EICR, ~cause); 1424 #ifndef GE_NORX 1425 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) 1426 intrmask = gfe_rx_process(sc, cause, intrmask); 1427 #endif 1428 1429 #ifndef GE_NOTX 1430 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) 1431 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1432 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) 1433 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1434 #endif 1435 if (cause & ETH_IR_MIIPhySTC) { 1436 sc->sc_flags |= GE_PHYSTSCHG; 1437 /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1438 } 1439 } 1440 1441 GE_FUNC_EXIT(sc, ""); 1442 return claim; 1443 } 1444 1445 int 1446 gfe_mii_mediachange (struct ifnet *ifp) 1447 { 1448 struct gfe_softc *sc = ifp->if_softc; 1449 1450 if (ifp->if_flags & IFF_UP) 1451 mii_mediachg(&sc->sc_mii); 1452 1453 return (0); 1454 } 1455 void 1456 gfe_mii_mediastatus (struct ifnet *ifp, struct ifmediareq *ifmr) 1457 { 1458 struct gfe_softc *sc = ifp->if_softc; 1459 1460 if (sc->sc_flags & GE_PHYSTSCHG) { 1461 sc->sc_flags &= ~GE_PHYSTSCHG; 1462 mii_pollstat(&sc->sc_mii); 1463 } 1464 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1465 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1466 } 1467 1468 int 1469 gfe_mii_read (struct device *self, int phy, int reg) 1470 { 1471 return gt_mii_read(self, self->dv_parent, phy, reg); 1472 } 1473 1474 void 1475 gfe_mii_write (struct device *self, int phy, int reg, int value) 1476 { 1477 gt_mii_write(self, self->dv_parent, phy, reg, value); 1478 } 1479 1480 void 1481 gfe_mii_statchg (struct device *self) 1482 { 1483 /* struct gfe_softc *sc = (struct gfe_softc *) self; */ 1484 /* do nothing? */ 1485 } 1486 1487 int 1488 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1489 { 1490 int error = 0; 1491 GE_FUNC_ENTER(sc, "gfe_whack"); 1492 1493 switch (op) { 1494 case GE_WHACK_RESTART: 1495 #ifndef GE_NOTX 1496 gfe_tx_stop(sc, op); 1497 #endif 1498 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1499 /* FALLTHROUGH */ 1500 case GE_WHACK_START: 1501 #ifndef GE_NOHASH 1502 if (error == 0 && sc->sc_hashtable == NULL) { 1503 error = gfe_hash_alloc(sc); 1504 if (error) 1505 break; 1506 } 1507 if (op != GE_WHACK_RESTART) 1508 gfe_hash_fill(sc); 1509 #endif 1510 #ifndef GE_NORX 1511 if (op != GE_WHACK_RESTART) { 1512 error = gfe_rx_prime(sc); 1513 if (error) 1514 break; 1515 } 1516 #endif 1517 #ifndef GE_NOTX 1518 error = gfe_tx_start(sc, GE_TXPRIO_HI); 1519 if (error) 1520 break; 1521 #endif 1522 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1523 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1524 GE_WRITE(sc, EPCXR, sc->sc_pcxr); 1525 GE_WRITE(sc, EICR, 0); 1526 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1527 #ifndef GE_NOHASH 1528 GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1529 #endif 1530 #ifndef GE_NORX 1531 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); 1532 sc->sc_flags |= GE_RXACTIVE; 1533 #endif 1534 /* FALLTHROUGH */ 1535 case GE_WHACK_CHANGE: 1536 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1537 GE_READ(sc, EPCR), GE_READ(sc, EIMR))); 1538 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); 1539 GE_WRITE(sc, EIMR, sc->sc_intrmask); 1540 gfe_ifstart(&sc->sc_ec.ec_if); 1541 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1542 GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); 1543 GE_FUNC_EXIT(sc, ""); 1544 return error; 1545 case GE_WHACK_STOP: 1546 break; 1547 } 1548 1549 #ifdef GE_DEBUG 1550 if (error) 1551 GE_DPRINTF(sc, (" failed: %d\n", error)); 1552 #endif 1553 GE_WRITE(sc, EPCR, sc->sc_pcr); 1554 GE_WRITE(sc, EIMR, 0); 1555 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1556 #ifndef GE_NOTX 1557 gfe_tx_stop(sc, GE_WHACK_STOP); 1558 #endif 1559 #ifndef GE_NORX 1560 gfe_rx_stop(sc, GE_WHACK_STOP); 1561 #endif 1562 #ifndef GE_NOHASH 1563 gfe_dmamem_free(sc, &sc->sc_hash_mem); 1564 sc->sc_hashtable = NULL; 1565 #endif 1566 1567 GE_FUNC_EXIT(sc, ""); 1568 return error; 1569 } 1570 1571 int 1572 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1573 { 1574 uint32_t w0, add0, add1; 1575 uint32_t result; 1576 1577 GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1578 add0 = ((uint32_t) eaddr[5] << 0) | 1579 ((uint32_t) eaddr[4] << 8) | 1580 ((uint32_t) eaddr[3] << 16); 1581 1582 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1583 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1584 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1585 1586 add1 = ((uint32_t) eaddr[2] << 0) | 1587 ((uint32_t) eaddr[1] << 8) | 1588 ((uint32_t) eaddr[0] << 16); 1589 1590 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1591 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1592 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1593 1594 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1595 /* 1596 * hashResult is the 15 bits Hash entry address. 1597 * ethernetADD is a 48 bit number, which is derived from the Ethernet 1598 * MAC address, by nibble swapping in every byte (i.e MAC address 1599 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1600 */ 1601 1602 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1603 /* 1604 * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1605 * 1606 * hashFunc0 calculates the hashResult in the following manner: 1607 * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1608 * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1609 */ 1610 result = (add0 & 3) | ((add0 >> 6) & ~3); 1611 result ^= (add0 >> 15) ^ (add1 >> 0); 1612 result &= 0x1ff; 1613 /* 1614 * hashResult[14:9] = ethernetADD[7:2] 1615 */ 1616 result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1617 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1618 } else { 1619 #define TRIBITFLIP 073516240 /* yes its in octal */ 1620 /* 1621 * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1622 * 1623 * hashFunc1 calculates the hashResult in the following manner: 1624 * hashResult[08:00] = ethernetADD[06:14] 1625 * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1626 */ 1627 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1628 /* 1629 * Now bitswap those 9 bits 1630 */ 1631 result = 0; 1632 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1633 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1634 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1635 1636 /* 1637 * hashResult[14:09] = ethernetADD[00:05] 1638 */ 1639 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1640 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1641 GE_DPRINTF(sc, ("1(%#x)", result)); 1642 } 1643 GE_FUNC_EXIT(sc, ""); 1644 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1645 } 1646 1647 int 1648 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1649 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1650 { 1651 uint64_t he; 1652 uint64_t *maybe_he_p = NULL; 1653 int limit; 1654 int hash; 1655 int maybe_hash = 0; 1656 1657 GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1658 1659 hash = gfe_hash_compute(sc, eaddr); 1660 1661 if (sc->sc_hashtable == NULL) { 1662 panic("%s:%d: hashtable == NULL!", sc->sc_dev.dv_xname, 1663 __LINE__); 1664 } 1665 1666 /* 1667 * Assume we are going to insert so create the hash entry we 1668 * are going to insert. We also use it to match entries we 1669 * will be removing. 1670 */ 1671 he = ((uint64_t) eaddr[5] << 43) | 1672 ((uint64_t) eaddr[4] << 35) | 1673 ((uint64_t) eaddr[3] << 27) | 1674 ((uint64_t) eaddr[2] << 19) | 1675 ((uint64_t) eaddr[1] << 11) | 1676 ((uint64_t) eaddr[0] << 3) | 1677 HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1678 1679 /* 1680 * The GT will search upto 12 entries for a hit, so we must mimic that. 1681 */ 1682 hash &= sc->sc_hashmask / sizeof(he); 1683 for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1684 /* 1685 * Does the GT wrap at the end, stop at the, or overrun the 1686 * end? Assume it wraps for now. Stash a copy of the 1687 * current hash entry. 1688 */ 1689 uint64_t *he_p = &sc->sc_hashtable[hash]; 1690 uint64_t thishe = *he_p; 1691 1692 /* 1693 * If the hash entry isn't valid, that break the chain. And 1694 * this entry a good candidate for reuse. 1695 */ 1696 if ((thishe & HSH_V) == 0) { 1697 maybe_he_p = he_p; 1698 break; 1699 } 1700 1701 /* 1702 * If the hash entry has the same address we are looking for 1703 * then ... if we are removing and the skip bit is set, its 1704 * already been removed. if are adding and the skip bit is 1705 * clear, then its already added. In either return EBUSY 1706 * indicating the op has already been done. Otherwise flip 1707 * the skip bit and return 0. 1708 */ 1709 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1710 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1711 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1712 return EBUSY; 1713 *he_p = thishe ^ HSH_S; 1714 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1715 hash * sizeof(he), sizeof(he), 1716 BUS_DMASYNC_PREWRITE); 1717 GE_FUNC_EXIT(sc, "^"); 1718 return 0; 1719 } 1720 1721 /* 1722 * If we haven't found a slot for the entry and this entry 1723 * is currently being skipped, return this entry. 1724 */ 1725 if (maybe_he_p == NULL && (thishe & HSH_S)) { 1726 maybe_he_p = he_p; 1727 maybe_hash = hash; 1728 } 1729 1730 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1731 } 1732 1733 /* 1734 * If we got here, then there was no entry to remove. 1735 */ 1736 if (op == GE_HASH_REMOVE) { 1737 GE_FUNC_EXIT(sc, "?"); 1738 return ENOENT; 1739 } 1740 1741 /* 1742 * If we couldn't find a slot, return an error. 1743 */ 1744 if (maybe_he_p == NULL) { 1745 GE_FUNC_EXIT(sc, "!"); 1746 return ENOSPC; 1747 } 1748 1749 /* Update the entry. 1750 */ 1751 *maybe_he_p = he; 1752 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1753 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1754 GE_FUNC_EXIT(sc, "+"); 1755 return 0; 1756 } 1757 1758 int 1759 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) 1760 { 1761 struct gfe_softc * const sc = ec->ec_if.if_softc; 1762 int error; 1763 enum gfe_hash_op op; 1764 enum gfe_rxprio prio; 1765 1766 GE_FUNC_ENTER(sc, "hash_multichg"); 1767 /* 1768 * Is this a wildcard entry? If so and its being removed, recompute. 1769 */ 1770 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1771 if (cmd == SIOCDELMULTI) { 1772 GE_FUNC_EXIT(sc, ""); 1773 return ENETRESET; 1774 } 1775 1776 /* 1777 * Switch in 1778 */ 1779 sc->sc_flags |= GE_ALLMULTI; 1780 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1781 sc->sc_pcr |= ETH_EPCR_PM; 1782 GE_WRITE(sc, EPCR, sc->sc_pcr); 1783 GE_FUNC_EXIT(sc, ""); 1784 return 0; 1785 } 1786 GE_FUNC_EXIT(sc, ""); 1787 return ENETRESET; 1788 } 1789 1790 prio = GE_RXPRIO_MEDLO; 1791 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1792 1793 if (sc->sc_hashtable == NULL) { 1794 GE_FUNC_EXIT(sc, ""); 1795 return 0; 1796 } 1797 1798 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1799 if (error == EBUSY) { 1800 printf("%s: multichg: tried to %s %s again\n", 1801 sc->sc_dev.dv_xname, 1802 cmd == SIOCDELMULTI ? "remove" : "add", 1803 ether_sprintf(enm->enm_addrlo)); 1804 GE_FUNC_EXIT(sc, ""); 1805 return 0; 1806 } 1807 1808 if (error == ENOENT) { 1809 printf("%s: multichg: failed to remove %s: not in table\n", 1810 sc->sc_dev.dv_xname, 1811 ether_sprintf(enm->enm_addrlo)); 1812 GE_FUNC_EXIT(sc, ""); 1813 return 0; 1814 } 1815 1816 if (error == ENOSPC) { 1817 printf("%s: multichg: failed to add %s: no space; regenerating table\n", 1818 sc->sc_dev.dv_xname, 1819 ether_sprintf(enm->enm_addrlo)); 1820 GE_FUNC_EXIT(sc, ""); 1821 return ENETRESET; 1822 } 1823 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 1824 sc->sc_dev.dv_xname, 1825 cmd == SIOCDELMULTI ? "remove" : "add", 1826 ether_sprintf(enm->enm_addrlo))); 1827 GE_FUNC_EXIT(sc, ""); 1828 return 0; 1829 } 1830 1831 int 1832 gfe_hash_fill(struct gfe_softc *sc) 1833 { 1834 struct ether_multistep step; 1835 struct ether_multi *enm; 1836 int error; 1837 1838 GE_FUNC_ENTER(sc, "gfe_hash_fill"); 1839 1840 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 1841 LLADDR(sc->sc_ec.ec_if.if_sadl)); 1842 if (error) 1843 GE_FUNC_EXIT(sc, "!"); 1844 return error; 1845 1846 sc->sc_flags &= ~GE_ALLMULTI; 1847 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) 1848 sc->sc_pcr &= ~ETH_EPCR_PM; 1849 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 1850 while (enm != NULL) { 1851 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1852 sc->sc_flags |= GE_ALLMULTI; 1853 sc->sc_pcr |= ETH_EPCR_PM; 1854 } else { 1855 error = gfe_hash_entry_op(sc, GE_HASH_ADD, 1856 GE_RXPRIO_MEDLO, enm->enm_addrlo); 1857 if (error == ENOSPC) 1858 break; 1859 } 1860 ETHER_NEXT_MULTI(step, enm); 1861 } 1862 1863 GE_FUNC_EXIT(sc, ""); 1864 return error; 1865 } 1866 1867 int 1868 gfe_hash_alloc(struct gfe_softc *sc) 1869 { 1870 int error; 1871 GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 1872 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 1873 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 1874 BUS_DMA_NOCACHE); 1875 if (error) { 1876 printf("%s: failed to allocate %d bytes for hash table: %d\n", 1877 sc->sc_dev.dv_xname, sc->sc_hashmask + 1, error); 1878 GE_FUNC_EXIT(sc, ""); 1879 return error; 1880 } 1881 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 1882 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 1883 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1884 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 1885 GE_FUNC_EXIT(sc, ""); 1886 return 0; 1887 } 1888