1 /* $Id: if_ae.c,v 1.9 2007/10/17 19:55:35 garbled Exp $ */ 2 /*- 3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 4 * Copyright (c) 2006 Garrett D'Amore. 5 * All rights reserved. 6 * 7 * This code was written by Garrett D'Amore for the Champaign-Urbana 8 * Community Wireless Network Project. 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 3. All advertising materials mentioning features or use of this 20 * software must display the following acknowledgements: 21 * This product includes software developed by the Urbana-Champaign 22 * Independent Media Center. 23 * This product includes software developed by Garrett D'Amore. 24 * 4. Urbana-Champaign Independent Media Center's name and Garrett 25 * D'Amore's name may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 /*- 43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 44 * All rights reserved. 45 * 46 * This code is derived from software contributed to The NetBSD Foundation 47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 48 * NASA Ames Research Center; and by Charles M. Hannum. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. All advertising materials mentioning features or use of this software 59 * must display the following acknowledgement: 60 * This product includes software developed by the NetBSD 61 * Foundation, Inc. and its contributors. 62 * 4. Neither the name of The NetBSD Foundation nor the names of its 63 * contributors may be used to endorse or promote products derived 64 * from this software without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 67 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 68 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 69 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 70 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 71 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 72 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 73 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 74 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 75 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 76 * POSSIBILITY OF SUCH DAMAGE. 77 */ 78 79 /* 80 * Device driver for the onboard ethernet MAC found on the AR5312 81 * chip's AHB bus. 82 * 83 * This device is very simliar to the tulip in most regards, and 84 * the code is directly derived from NetBSD's tulip.c. However, it 85 * is different enough that it did not seem to be a good idea to 86 * add further complexity to the tulip driver, so we have our own. 87 * 88 * Also tulip has a lot of complexity in it for various parts/options 89 * that we don't need, and on these little boxes with only ~8MB RAM, we 90 * don't want any extra bloat. 91 */ 92 93 /* 94 * TODO: 95 * 96 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align 97 * inbound packets on a half-word boundary, which would make life easier 98 * for TCP/IP. (Aligning IP headers on a word.) 99 * 100 * 2) There is stuff in original tulip to shut down the device when reacting 101 * to a a change in link status. Is that needed. 102 * 103 * 3) Test with variety of 10/100 HDX/FDX scenarios. 104 * 105 */ 106 107 #include <sys/cdefs.h> 108 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.9 2007/10/17 19:55:35 garbled Exp $"); 109 110 #include "bpfilter.h" 111 112 #include <sys/param.h> 113 #include <sys/systm.h> 114 #include <sys/callout.h> 115 #include <sys/mbuf.h> 116 #include <sys/malloc.h> 117 #include <sys/kernel.h> 118 #include <sys/socket.h> 119 #include <sys/ioctl.h> 120 #include <sys/errno.h> 121 #include <sys/device.h> 122 123 #include <machine/endian.h> 124 125 #include <uvm/uvm_extern.h> 126 127 #include <net/if.h> 128 #include <net/if_dl.h> 129 #include <net/if_media.h> 130 #include <net/if_ether.h> 131 132 #if NBPFILTER > 0 133 #include <net/bpf.h> 134 #endif 135 136 #include <machine/bus.h> 137 #include <machine/intr.h> 138 139 #include <dev/mii/mii.h> 140 #include <dev/mii/miivar.h> 141 #include <dev/mii/mii_bitbang.h> 142 143 #include <mips/atheros/include/arbusvar.h> 144 #include <mips/atheros/dev/aereg.h> 145 #include <mips/atheros/dev/aevar.h> 146 147 static const struct { 148 u_int32_t txth_opmode; /* OPMODE bits */ 149 const char *txth_name; /* name of mode */ 150 } ae_txthresh[] = { 151 { OPMODE_TR_32, "32 words" }, 152 { OPMODE_TR_64, "64 words" }, 153 { OPMODE_TR_128, "128 words" }, 154 { OPMODE_TR_256, "256 words" }, 155 { OPMODE_SF, "store and forward mode" }, 156 { 0, NULL }, 157 }; 158 159 static int ae_match(struct device *, struct cfdata *, void *); 160 static void ae_attach(struct device *, struct device *, void *); 161 static int ae_detach(struct device *, int); 162 static int ae_activate(struct device *, enum devact); 163 164 static void ae_reset(struct ae_softc *); 165 static void ae_idle(struct ae_softc *, u_int32_t); 166 167 static int ae_mediachange(struct ifnet *); 168 static void ae_mediastatus(struct ifnet *, struct ifmediareq *); 169 170 static void ae_start(struct ifnet *); 171 static void ae_watchdog(struct ifnet *); 172 static int ae_ioctl(struct ifnet *, u_long, void *); 173 static int ae_init(struct ifnet *); 174 static void ae_stop(struct ifnet *, int); 175 176 static void ae_shutdown(void *); 177 178 static void ae_rxdrain(struct ae_softc *); 179 static int ae_add_rxbuf(struct ae_softc *, int); 180 181 static int ae_enable(struct ae_softc *); 182 static void ae_disable(struct ae_softc *); 183 static void ae_power(int, void *); 184 185 static void ae_filter_setup(struct ae_softc *); 186 187 static int ae_intr(void *); 188 static void ae_rxintr(struct ae_softc *); 189 static void ae_txintr(struct ae_softc *); 190 191 static void ae_mii_tick(void *); 192 static void ae_mii_statchg(struct device *); 193 194 static int ae_mii_readreg(struct device *, int, int); 195 static void ae_mii_writereg(struct device *, int, int, int); 196 197 #ifdef AE_DEBUG 198 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 199 printf x 200 #else 201 #define DPRINTF(sc, x) /* nothing */ 202 #endif 203 204 #ifdef AE_STATS 205 static void ae_print_stats(struct ae_softc *); 206 #endif 207 208 CFATTACH_DECL(ae, sizeof(struct ae_softc), 209 ae_match, ae_attach, ae_detach, ae_activate); 210 211 /* 212 * ae_match: 213 * 214 * Check for a device match. 215 */ 216 int 217 ae_match(struct device *parent, struct cfdata *cf, void *aux) 218 { 219 struct arbus_attach_args *aa = aux; 220 221 if (strcmp(aa->aa_name, cf->cf_name) == 0) 222 return 1; 223 224 return 0; 225 226 } 227 228 /* 229 * ae_attach: 230 * 231 * Attach an ae interface to the system. 232 */ 233 void 234 ae_attach(struct device *parent, struct device *self, void *aux) 235 { 236 const uint8_t *enaddr; 237 prop_data_t ea; 238 struct ae_softc *sc = (void *)self; 239 struct arbus_attach_args *aa = aux; 240 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 241 int i, error; 242 243 callout_init(&sc->sc_tick_callout, 0); 244 245 printf(": Atheros AR531X 10/100 Ethernet\n"); 246 247 /* 248 * Try to get MAC address. 249 */ 250 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 251 if (ea == NULL) { 252 printf("%s: unable to get mac-addr property\n", 253 sc->sc_dev.dv_xname); 254 return; 255 } 256 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 257 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 258 enaddr = prop_data_data_nocopy(ea); 259 260 /* Announce ourselves. */ 261 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 262 ether_sprintf(enaddr)); 263 264 sc->sc_cirq = aa->aa_cirq; 265 sc->sc_mirq = aa->aa_mirq; 266 sc->sc_st = aa->aa_bst; 267 sc->sc_dmat = aa->aa_dmat; 268 269 SIMPLEQ_INIT(&sc->sc_txfreeq); 270 SIMPLEQ_INIT(&sc->sc_txdirtyq); 271 272 /* 273 * Map registers. 274 */ 275 sc->sc_size = aa->aa_size; 276 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0, 277 &sc->sc_sh)) != 0) { 278 printf("%s: unable to map registers, error = %d\n", 279 sc->sc_dev.dv_xname, error); 280 goto fail_0; 281 } 282 283 /* 284 * Allocate the control data structures, and create and load the 285 * DMA map for it. 286 */ 287 if ((error = bus_dmamem_alloc(sc->sc_dmat, 288 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 289 1, &sc->sc_cdnseg, 0)) != 0) { 290 printf("%s: unable to allocate control data, error = %d\n", 291 sc->sc_dev.dv_xname, error); 292 goto fail_1; 293 } 294 295 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg, 296 sizeof(struct ae_control_data), (void **)&sc->sc_control_data, 297 BUS_DMA_COHERENT)) != 0) { 298 printf("%s: unable to map control data, error = %d\n", 299 sc->sc_dev.dv_xname, error); 300 goto fail_2; 301 } 302 303 if ((error = bus_dmamap_create(sc->sc_dmat, 304 sizeof(struct ae_control_data), 1, 305 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 306 printf("%s: unable to create control data DMA map, " 307 "error = %d\n", sc->sc_dev.dv_xname, error); 308 goto fail_3; 309 } 310 311 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 312 sc->sc_control_data, sizeof(struct ae_control_data), NULL, 313 0)) != 0) { 314 printf("%s: unable to load control data DMA map, error = %d\n", 315 sc->sc_dev.dv_xname, error); 316 goto fail_4; 317 } 318 319 /* 320 * Create the transmit buffer DMA maps. 321 */ 322 for (i = 0; i < AE_TXQUEUELEN; i++) { 323 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 324 AE_NTXSEGS, MCLBYTES, 0, 0, 325 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 326 printf("%s: unable to create tx DMA map %d, " 327 "error = %d\n", sc->sc_dev.dv_xname, i, error); 328 goto fail_5; 329 } 330 } 331 332 /* 333 * Create the receive buffer DMA maps. 334 */ 335 for (i = 0; i < AE_NRXDESC; i++) { 336 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 337 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 338 printf("%s: unable to create rx DMA map %d, " 339 "error = %d\n", sc->sc_dev.dv_xname, i, error); 340 goto fail_6; 341 } 342 sc->sc_rxsoft[i].rxs_mbuf = NULL; 343 } 344 345 /* 346 * Reset the chip to a known state. 347 */ 348 ae_reset(sc); 349 350 /* 351 * From this point forward, the attachment cannot fail. A failure 352 * before this point releases all resources that may have been 353 * allocated. 354 */ 355 sc->sc_flags |= AE_ATTACHED; 356 357 /* 358 * Initialize our media structures. This may probe the MII, if 359 * present. 360 */ 361 sc->sc_mii.mii_ifp = ifp; 362 sc->sc_mii.mii_readreg = ae_mii_readreg; 363 sc->sc_mii.mii_writereg = ae_mii_writereg; 364 sc->sc_mii.mii_statchg = ae_mii_statchg; 365 ifmedia_init(&sc->sc_mii.mii_media, 0, ae_mediachange, 366 ae_mediastatus); 367 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 368 MII_OFFSET_ANY, 0); 369 370 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 371 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 372 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 373 } else 374 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 375 376 sc->sc_tick = ae_mii_tick; 377 378 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 379 ifp->if_softc = sc; 380 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 381 sc->sc_if_flags = ifp->if_flags; 382 ifp->if_ioctl = ae_ioctl; 383 ifp->if_start = ae_start; 384 ifp->if_watchdog = ae_watchdog; 385 ifp->if_init = ae_init; 386 ifp->if_stop = ae_stop; 387 IFQ_SET_READY(&ifp->if_snd); 388 389 /* 390 * We can support 802.1Q VLAN-sized frames. 391 */ 392 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 393 394 /* 395 * Attach the interface. 396 */ 397 if_attach(ifp); 398 ether_ifattach(ifp, enaddr); 399 400 #if NRND > 0 401 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname, 402 RND_TYPE_NET, 0); 403 #endif 404 405 /* 406 * Make sure the interface is shutdown during reboot. 407 */ 408 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc); 409 if (sc->sc_sdhook == NULL) 410 printf("%s: WARNING: unable to establish shutdown hook\n", 411 sc->sc_dev.dv_xname); 412 413 /* 414 * Add a suspend hook to make sure we come back up after a 415 * resume. 416 */ 417 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname, 418 ae_power, sc); 419 if (sc->sc_powerhook == NULL) 420 printf("%s: WARNING: unable to establish power hook\n", 421 sc->sc_dev.dv_xname); 422 return; 423 424 /* 425 * Free any resources we've allocated during the failed attach 426 * attempt. Do this in reverse order and fall through. 427 */ 428 fail_6: 429 for (i = 0; i < AE_NRXDESC; i++) { 430 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 431 bus_dmamap_destroy(sc->sc_dmat, 432 sc->sc_rxsoft[i].rxs_dmamap); 433 } 434 fail_5: 435 for (i = 0; i < AE_TXQUEUELEN; i++) { 436 if (sc->sc_txsoft[i].txs_dmamap != NULL) 437 bus_dmamap_destroy(sc->sc_dmat, 438 sc->sc_txsoft[i].txs_dmamap); 439 } 440 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 441 fail_4: 442 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 443 fail_3: 444 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 445 sizeof(struct ae_control_data)); 446 fail_2: 447 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 448 fail_1: 449 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 450 fail_0: 451 return; 452 } 453 454 /* 455 * ae_activate: 456 * 457 * Handle device activation/deactivation requests. 458 */ 459 int 460 ae_activate(struct device *self, enum devact act) 461 { 462 struct ae_softc *sc = (void *) self; 463 int s, error = 0; 464 465 s = splnet(); 466 switch (act) { 467 case DVACT_ACTIVATE: 468 error = EOPNOTSUPP; 469 break; 470 471 case DVACT_DEACTIVATE: 472 mii_activate(&sc->sc_mii, act, MII_PHY_ANY, MII_OFFSET_ANY); 473 if_deactivate(&sc->sc_ethercom.ec_if); 474 break; 475 } 476 splx(s); 477 478 return (error); 479 } 480 481 /* 482 * ae_detach: 483 * 484 * Detach a device interface. 485 */ 486 int 487 ae_detach(struct device *self, int flags) 488 { 489 struct ae_softc *sc = (void *)self; 490 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 491 struct ae_rxsoft *rxs; 492 struct ae_txsoft *txs; 493 int i; 494 495 /* 496 * Succeed now if there isn't any work to do. 497 */ 498 if ((sc->sc_flags & AE_ATTACHED) == 0) 499 return (0); 500 501 /* Unhook our tick handler. */ 502 if (sc->sc_tick) 503 callout_stop(&sc->sc_tick_callout); 504 505 /* Detach all PHYs */ 506 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 507 508 /* Delete all remaining media. */ 509 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 510 511 #if NRND > 0 512 rnd_detach_source(&sc->sc_rnd_source); 513 #endif 514 ether_ifdetach(ifp); 515 if_detach(ifp); 516 517 for (i = 0; i < AE_NRXDESC; i++) { 518 rxs = &sc->sc_rxsoft[i]; 519 if (rxs->rxs_mbuf != NULL) { 520 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 521 m_freem(rxs->rxs_mbuf); 522 rxs->rxs_mbuf = NULL; 523 } 524 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); 525 } 526 for (i = 0; i < AE_TXQUEUELEN; i++) { 527 txs = &sc->sc_txsoft[i]; 528 if (txs->txs_mbuf != NULL) { 529 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 530 m_freem(txs->txs_mbuf); 531 txs->txs_mbuf = NULL; 532 } 533 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); 534 } 535 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 536 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 537 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 538 sizeof(struct ae_control_data)); 539 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 540 541 shutdownhook_disestablish(sc->sc_sdhook); 542 powerhook_disestablish(sc->sc_powerhook); 543 544 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 545 546 547 return (0); 548 } 549 550 /* 551 * ae_shutdown: 552 * 553 * Make sure the interface is stopped at reboot time. 554 */ 555 static void 556 ae_shutdown(void *arg) 557 { 558 struct ae_softc *sc = arg; 559 560 ae_stop(&sc->sc_ethercom.ec_if, 1); 561 } 562 563 /* 564 * ae_start: [ifnet interface function] 565 * 566 * Start packet transmission on the interface. 567 */ 568 static void 569 ae_start(struct ifnet *ifp) 570 { 571 struct ae_softc *sc = ifp->if_softc; 572 struct mbuf *m0, *m; 573 struct ae_txsoft *txs, *last_txs = NULL; 574 bus_dmamap_t dmamap; 575 int error, firsttx, nexttx, lasttx = 1, ofree, seg; 576 577 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n", 578 sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags)); 579 580 581 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 582 return; 583 584 /* 585 * Remember the previous number of free descriptors and 586 * the first descriptor we'll use. 587 */ 588 ofree = sc->sc_txfree; 589 firsttx = sc->sc_txnext; 590 591 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n", 592 sc->sc_dev.dv_xname, ofree, firsttx)); 593 594 /* 595 * Loop through the send queue, setting up transmit descriptors 596 * until we drain the queue, or use up all available transmit 597 * descriptors. 598 */ 599 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 600 sc->sc_txfree != 0) { 601 /* 602 * Grab a packet off the queue. 603 */ 604 IFQ_POLL(&ifp->if_snd, m0); 605 if (m0 == NULL) 606 break; 607 m = NULL; 608 609 dmamap = txs->txs_dmamap; 610 611 /* 612 * Load the DMA map. If this fails, the packet either 613 * didn't fit in the alloted number of segments, or we were 614 * short on resources. In this case, we'll copy and try 615 * again. 616 */ 617 if (((mtod(m0, uintptr_t) & 3) != 0) || 618 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 619 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 620 MGETHDR(m, M_DONTWAIT, MT_DATA); 621 if (m == NULL) { 622 printf("%s: unable to allocate Tx mbuf\n", 623 sc->sc_dev.dv_xname); 624 break; 625 } 626 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 627 if (m0->m_pkthdr.len > MHLEN) { 628 MCLGET(m, M_DONTWAIT); 629 if ((m->m_flags & M_EXT) == 0) { 630 printf("%s: unable to allocate Tx " 631 "cluster\n", sc->sc_dev.dv_xname); 632 m_freem(m); 633 break; 634 } 635 } 636 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 637 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 638 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 639 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 640 if (error) { 641 printf("%s: unable to load Tx buffer, " 642 "error = %d\n", sc->sc_dev.dv_xname, 643 error); 644 break; 645 } 646 } 647 648 /* 649 * Ensure we have enough descriptors free to describe 650 * the packet. 651 */ 652 if (dmamap->dm_nsegs > sc->sc_txfree) { 653 /* 654 * Not enough free descriptors to transmit this 655 * packet. We haven't committed to anything yet, 656 * so just unload the DMA map, put the packet 657 * back on the queue, and punt. Notify the upper 658 * layer that there are no more slots left. 659 * 660 * XXX We could allocate an mbuf and copy, but 661 * XXX it is worth it? 662 */ 663 ifp->if_flags |= IFF_OACTIVE; 664 bus_dmamap_unload(sc->sc_dmat, dmamap); 665 if (m != NULL) 666 m_freem(m); 667 break; 668 } 669 670 IFQ_DEQUEUE(&ifp->if_snd, m0); 671 if (m != NULL) { 672 m_freem(m0); 673 m0 = m; 674 } 675 676 /* 677 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 678 */ 679 680 /* Sync the DMA map. */ 681 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 682 BUS_DMASYNC_PREWRITE); 683 684 /* 685 * Initialize the transmit descriptors. 686 */ 687 for (nexttx = sc->sc_txnext, seg = 0; 688 seg < dmamap->dm_nsegs; 689 seg++, nexttx = AE_NEXTTX(nexttx)) { 690 /* 691 * If this is the first descriptor we're 692 * enqueueing, don't set the OWN bit just 693 * yet. That could cause a race condition. 694 * We'll do it below. 695 */ 696 sc->sc_txdescs[nexttx].ad_status = 697 (nexttx == firsttx) ? 0 : ADSTAT_OWN; 698 sc->sc_txdescs[nexttx].ad_bufaddr1 = 699 dmamap->dm_segs[seg].ds_addr; 700 sc->sc_txdescs[nexttx].ad_ctl = 701 (dmamap->dm_segs[seg].ds_len << 702 ADCTL_SIZE1_SHIFT) | 703 (nexttx == (AE_NTXDESC - 1) ? 704 ADCTL_ER : 0); 705 lasttx = nexttx; 706 } 707 708 KASSERT(lasttx != -1); 709 710 /* Set `first segment' and `last segment' appropriately. */ 711 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS; 712 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS; 713 714 #ifdef AE_DEBUG 715 if (ifp->if_flags & IFF_DEBUG) { 716 printf(" txsoft %p transmit chain:\n", txs); 717 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) { 718 printf(" descriptor %d:\n", seg); 719 printf(" ad_status: 0x%08x\n", 720 sc->sc_txdescs[seg].ad_status); 721 printf(" ad_ctl: 0x%08x\n", 722 sc->sc_txdescs[seg].ad_ctl); 723 printf(" ad_bufaddr1: 0x%08x\n", 724 sc->sc_txdescs[seg].ad_bufaddr1); 725 printf(" ad_bufaddr2: 0x%08x\n", 726 sc->sc_txdescs[seg].ad_bufaddr2); 727 if (seg == lasttx) 728 break; 729 } 730 } 731 #endif 732 733 /* Sync the descriptors we're using. */ 734 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 735 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 736 737 /* 738 * Store a pointer to the packet so we can free it later, 739 * and remember what txdirty will be once the packet is 740 * done. 741 */ 742 txs->txs_mbuf = m0; 743 txs->txs_firstdesc = sc->sc_txnext; 744 txs->txs_lastdesc = lasttx; 745 txs->txs_ndescs = dmamap->dm_nsegs; 746 747 /* Advance the tx pointer. */ 748 sc->sc_txfree -= dmamap->dm_nsegs; 749 sc->sc_txnext = nexttx; 750 751 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 752 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 753 754 last_txs = txs; 755 756 #if NBPFILTER > 0 757 /* 758 * Pass the packet to any BPF listeners. 759 */ 760 if (ifp->if_bpf) 761 bpf_mtap(ifp->if_bpf, m0); 762 #endif /* NBPFILTER > 0 */ 763 } 764 765 if (txs == NULL || sc->sc_txfree == 0) { 766 /* No more slots left; notify upper layer. */ 767 ifp->if_flags |= IFF_OACTIVE; 768 } 769 770 if (sc->sc_txfree != ofree) { 771 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 772 sc->sc_dev.dv_xname, lasttx, firsttx)); 773 /* 774 * Cause a transmit interrupt to happen on the 775 * last packet we enqueued. 776 */ 777 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC; 778 AE_CDTXSYNC(sc, lasttx, 1, 779 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 780 781 /* 782 * The entire packet chain is set up. Give the 783 * first descriptor to the chip now. 784 */ 785 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN; 786 AE_CDTXSYNC(sc, firsttx, 1, 787 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 788 789 /* Wake up the transmitter. */ 790 /* XXX USE AUTOPOLLING? */ 791 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD); 792 AE_BARRIER(sc); 793 794 /* Set a watchdog timer in case the chip flakes out. */ 795 ifp->if_timer = 5; 796 } 797 } 798 799 /* 800 * ae_watchdog: [ifnet interface function] 801 * 802 * Watchdog timer handler. 803 */ 804 static void 805 ae_watchdog(struct ifnet *ifp) 806 { 807 struct ae_softc *sc = ifp->if_softc; 808 int doing_transmit; 809 810 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq)); 811 812 if (doing_transmit) { 813 printf("%s: transmit timeout\n", sc->sc_dev.dv_xname); 814 ifp->if_oerrors++; 815 } 816 else 817 printf("%s: spurious watchdog timeout\n", sc->sc_dev.dv_xname); 818 819 (void) ae_init(ifp); 820 821 /* Try to get more packets going. */ 822 ae_start(ifp); 823 } 824 825 /* 826 * ae_ioctl: [ifnet interface function] 827 * 828 * Handle control requests from the operator. 829 */ 830 static int 831 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data) 832 { 833 struct ae_softc *sc = ifp->if_softc; 834 struct ifreq *ifr = (struct ifreq *)data; 835 int s, error; 836 837 s = splnet(); 838 839 switch (cmd) { 840 case SIOCSIFMEDIA: 841 case SIOCGIFMEDIA: 842 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 843 break; 844 case SIOCSIFFLAGS: 845 /* If the interface is up and running, only modify the receive 846 * filter when setting promiscuous or debug mode. Otherwise 847 * fall through to ether_ioctl, which will reset the chip. 848 */ 849 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG) 850 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING)) 851 == (IFF_UP|IFF_RUNNING)) 852 && ((ifp->if_flags & (~RESETIGN)) 853 == (sc->sc_if_flags & (~RESETIGN)))) { 854 /* Set up the receive filter. */ 855 ae_filter_setup(sc); 856 error = 0; 857 break; 858 #undef RESETIGN 859 } 860 /* FALLTHROUGH */ 861 default: 862 error = ether_ioctl(ifp, cmd, data); 863 if (error == ENETRESET) { 864 if (ifp->if_flags & IFF_RUNNING) { 865 /* 866 * Multicast list has changed. Set the 867 * hardware filter accordingly. 868 */ 869 ae_filter_setup(sc); 870 } 871 error = 0; 872 } 873 break; 874 } 875 876 /* Try to get more packets going. */ 877 if (AE_IS_ENABLED(sc)) 878 ae_start(ifp); 879 880 sc->sc_if_flags = ifp->if_flags; 881 splx(s); 882 return (error); 883 } 884 885 /* 886 * ae_intr: 887 * 888 * Interrupt service routine. 889 */ 890 int 891 ae_intr(void *arg) 892 { 893 struct ae_softc *sc = arg; 894 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 895 u_int32_t status, rxstatus, txstatus; 896 int handled = 0, txthresh; 897 898 DPRINTF(sc, ("%s: ae_intr\n", sc->sc_dev.dv_xname)); 899 900 #ifdef DEBUG 901 if (AE_IS_ENABLED(sc) == 0) 902 panic("%s: ae_intr: not enabled", sc->sc_dev.dv_xname); 903 #endif 904 905 /* 906 * If the interface isn't running, the interrupt couldn't 907 * possibly have come from us. 908 */ 909 if ((ifp->if_flags & IFF_RUNNING) == 0 || 910 !device_is_active(&sc->sc_dev)) { 911 printf("spurious?!?\n"); 912 return (0); 913 } 914 915 for (;;) { 916 status = AE_READ(sc, CSR_STATUS); 917 if (status) { 918 AE_WRITE(sc, CSR_STATUS, status); 919 AE_BARRIER(sc); 920 } 921 922 if ((status & sc->sc_inten) == 0) 923 break; 924 925 handled = 1; 926 927 rxstatus = status & sc->sc_rxint_mask; 928 txstatus = status & sc->sc_txint_mask; 929 930 if (rxstatus) { 931 /* Grab new any new packets. */ 932 ae_rxintr(sc); 933 934 if (rxstatus & STATUS_RU) { 935 printf("%s: receive ring overrun\n", 936 sc->sc_dev.dv_xname); 937 /* Get the receive process going again. */ 938 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 939 AE_BARRIER(sc); 940 break; 941 } 942 } 943 944 if (txstatus) { 945 /* Sweep up transmit descriptors. */ 946 ae_txintr(sc); 947 948 if (txstatus & STATUS_TJT) 949 printf("%s: transmit jabber timeout\n", 950 sc->sc_dev.dv_xname); 951 952 if (txstatus & STATUS_UNF) { 953 /* 954 * Increase our transmit threshold if 955 * another is available. 956 */ 957 txthresh = sc->sc_txthresh + 1; 958 if (ae_txthresh[txthresh].txth_name != NULL) { 959 uint32_t opmode; 960 /* Idle the transmit process. */ 961 opmode = AE_READ(sc, CSR_OPMODE); 962 ae_idle(sc, OPMODE_ST); 963 964 sc->sc_txthresh = txthresh; 965 opmode &= 966 ~(OPMODE_TR|OPMODE_SF); 967 opmode |= 968 ae_txthresh[txthresh].txth_opmode; 969 printf("%s: transmit underrun; new " 970 "threshold: %s\n", 971 sc->sc_dev.dv_xname, 972 ae_txthresh[txthresh].txth_name); 973 974 /* 975 * Set the new threshold and restart 976 * the transmit process. 977 */ 978 AE_WRITE(sc, CSR_OPMODE, opmode); 979 AE_BARRIER(sc); 980 } 981 /* 982 * XXX Log every Nth underrun from 983 * XXX now on? 984 */ 985 } 986 } 987 988 if (status & (STATUS_TPS|STATUS_RPS)) { 989 if (status & STATUS_TPS) 990 printf("%s: transmit process stopped\n", 991 sc->sc_dev.dv_xname); 992 if (status & STATUS_RPS) 993 printf("%s: receive process stopped\n", 994 sc->sc_dev.dv_xname); 995 (void) ae_init(ifp); 996 break; 997 } 998 999 if (status & STATUS_SE) { 1000 const char *str; 1001 1002 if (status & STATUS_TX_ABORT) 1003 str = "tx abort"; 1004 else if (status & STATUS_RX_ABORT) 1005 str = "rx abort"; 1006 else 1007 str = "unknown error"; 1008 1009 printf("%s: fatal system error: %s\n", 1010 sc->sc_dev.dv_xname, str); 1011 (void) ae_init(ifp); 1012 break; 1013 } 1014 1015 /* 1016 * Not handled: 1017 * 1018 * Transmit buffer unavailable -- normal 1019 * condition, nothing to do, really. 1020 * 1021 * General purpose timer experied -- we don't 1022 * use the general purpose timer. 1023 * 1024 * Early receive interrupt -- not available on 1025 * all chips, we just use RI. We also only 1026 * use single-segment receive DMA, so this 1027 * is mostly useless. 1028 */ 1029 } 1030 1031 /* Try to get more packets going. */ 1032 ae_start(ifp); 1033 1034 #if NRND > 0 1035 if (handled) 1036 rnd_add_uint32(&sc->sc_rnd_source, status); 1037 #endif 1038 return (handled); 1039 } 1040 1041 /* 1042 * ae_rxintr: 1043 * 1044 * Helper; handle receive interrupts. 1045 */ 1046 static void 1047 ae_rxintr(struct ae_softc *sc) 1048 { 1049 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1050 struct ether_header *eh; 1051 struct ae_rxsoft *rxs; 1052 struct mbuf *m; 1053 u_int32_t rxstat; 1054 int i, len; 1055 1056 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) { 1057 rxs = &sc->sc_rxsoft[i]; 1058 1059 AE_CDRXSYNC(sc, i, 1060 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1061 1062 rxstat = sc->sc_rxdescs[i].ad_status; 1063 1064 if (rxstat & ADSTAT_OWN) { 1065 /* 1066 * We have processed all of the receive buffers. 1067 */ 1068 break; 1069 } 1070 1071 /* 1072 * If any collisions were seen on the wire, count one. 1073 */ 1074 if (rxstat & ADSTAT_Rx_CS) 1075 ifp->if_collisions++; 1076 1077 /* 1078 * If an error occurred, update stats, clear the status 1079 * word, and leave the packet buffer in place. It will 1080 * simply be reused the next time the ring comes around. 1081 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long 1082 * error. 1083 */ 1084 if (rxstat & ADSTAT_ES && 1085 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 || 1086 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF | 1087 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) { 1088 #define PRINTERR(bit, str) \ 1089 if (rxstat & (bit)) \ 1090 printf("%s: receive error: %s\n", \ 1091 sc->sc_dev.dv_xname, str) 1092 ifp->if_ierrors++; 1093 PRINTERR(ADSTAT_Rx_DE, "descriptor error"); 1094 PRINTERR(ADSTAT_Rx_RF, "runt frame"); 1095 PRINTERR(ADSTAT_Rx_TL, "frame too long"); 1096 PRINTERR(ADSTAT_Rx_RE, "MII error"); 1097 PRINTERR(ADSTAT_Rx_DB, "dribbling bit"); 1098 PRINTERR(ADSTAT_Rx_CE, "CRC error"); 1099 #undef PRINTERR 1100 AE_INIT_RXDESC(sc, i); 1101 continue; 1102 } 1103 1104 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1105 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1106 1107 /* 1108 * No errors; receive the packet. Note the chip 1109 * includes the CRC with every packet. 1110 */ 1111 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN; 1112 1113 /* 1114 * XXX: the Atheros part can align on half words. what 1115 * is the performance implication of this? Probably 1116 * minimal, and we should use it... 1117 */ 1118 #ifdef __NO_STRICT_ALIGNMENT 1119 /* 1120 * Allocate a new mbuf cluster. If that fails, we are 1121 * out of memory, and must drop the packet and recycle 1122 * the buffer that's already attached to this descriptor. 1123 */ 1124 m = rxs->rxs_mbuf; 1125 if (ae_add_rxbuf(sc, i) != 0) { 1126 ifp->if_ierrors++; 1127 AE_INIT_RXDESC(sc, i); 1128 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1129 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1130 continue; 1131 } 1132 #else 1133 /* 1134 * The chip's receive buffers must be 4-byte aligned. 1135 * But this means that the data after the Ethernet header 1136 * is misaligned. We must allocate a new buffer and 1137 * copy the data, shifted forward 2 bytes. 1138 */ 1139 MGETHDR(m, M_DONTWAIT, MT_DATA); 1140 if (m == NULL) { 1141 dropit: 1142 ifp->if_ierrors++; 1143 AE_INIT_RXDESC(sc, i); 1144 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1145 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1146 continue; 1147 } 1148 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1149 if (len > (MHLEN - 2)) { 1150 MCLGET(m, M_DONTWAIT); 1151 if ((m->m_flags & M_EXT) == 0) { 1152 m_freem(m); 1153 goto dropit; 1154 } 1155 } 1156 m->m_data += 2; 1157 1158 /* 1159 * Note that we use clusters for incoming frames, so the 1160 * buffer is virtually contiguous. 1161 */ 1162 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); 1163 1164 /* Allow the receive descriptor to continue using its mbuf. */ 1165 AE_INIT_RXDESC(sc, i); 1166 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1167 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1168 #endif /* __NO_STRICT_ALIGNMENT */ 1169 1170 ifp->if_ipackets++; 1171 eh = mtod(m, struct ether_header *); 1172 m->m_pkthdr.rcvif = ifp; 1173 m->m_pkthdr.len = m->m_len = len; 1174 1175 #if NBPFILTER > 0 1176 /* 1177 * Pass this up to any BPF listeners, but only 1178 * pass it up the stack if its for us. 1179 */ 1180 if (ifp->if_bpf) 1181 bpf_mtap(ifp->if_bpf, m); 1182 #endif /* NBPFILTER > 0 */ 1183 1184 /* Pass it on. */ 1185 (*ifp->if_input)(ifp, m); 1186 } 1187 1188 /* Update the receive pointer. */ 1189 sc->sc_rxptr = i; 1190 } 1191 1192 /* 1193 * ae_txintr: 1194 * 1195 * Helper; handle transmit interrupts. 1196 */ 1197 static void 1198 ae_txintr(struct ae_softc *sc) 1199 { 1200 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1201 struct ae_txsoft *txs; 1202 u_int32_t txstat; 1203 1204 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n", 1205 sc->sc_dev.dv_xname, sc->sc_flags)); 1206 1207 ifp->if_flags &= ~IFF_OACTIVE; 1208 1209 /* 1210 * Go through our Tx list and free mbufs for those 1211 * frames that have been transmitted. 1212 */ 1213 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1214 AE_CDTXSYNC(sc, txs->txs_lastdesc, 1215 txs->txs_ndescs, 1216 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1217 1218 #ifdef AE_DEBUG 1219 if (ifp->if_flags & IFF_DEBUG) { 1220 int i; 1221 printf(" txsoft %p transmit chain:\n", txs); 1222 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) { 1223 printf(" descriptor %d:\n", i); 1224 printf(" ad_status: 0x%08x\n", 1225 sc->sc_txdescs[i].ad_status); 1226 printf(" ad_ctl: 0x%08x\n", 1227 sc->sc_txdescs[i].ad_ctl); 1228 printf(" ad_bufaddr1: 0x%08x\n", 1229 sc->sc_txdescs[i].ad_bufaddr1); 1230 printf(" ad_bufaddr2: 0x%08x\n", 1231 sc->sc_txdescs[i].ad_bufaddr2); 1232 if (i == txs->txs_lastdesc) 1233 break; 1234 } 1235 } 1236 #endif 1237 1238 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status; 1239 if (txstat & ADSTAT_OWN) 1240 break; 1241 1242 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1243 1244 sc->sc_txfree += txs->txs_ndescs; 1245 1246 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1247 0, txs->txs_dmamap->dm_mapsize, 1248 BUS_DMASYNC_POSTWRITE); 1249 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1250 m_freem(txs->txs_mbuf); 1251 txs->txs_mbuf = NULL; 1252 1253 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1254 1255 /* 1256 * Check for errors and collisions. 1257 */ 1258 #ifdef AE_STATS 1259 if (txstat & ADSTAT_Tx_UF) 1260 sc->sc_stats.ts_tx_uf++; 1261 if (txstat & ADSTAT_Tx_TO) 1262 sc->sc_stats.ts_tx_to++; 1263 if (txstat & ADSTAT_Tx_EC) 1264 sc->sc_stats.ts_tx_ec++; 1265 if (txstat & ADSTAT_Tx_LC) 1266 sc->sc_stats.ts_tx_lc++; 1267 #endif 1268 1269 if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO)) 1270 ifp->if_oerrors++; 1271 1272 if (txstat & ADSTAT_Tx_EC) 1273 ifp->if_collisions += 16; 1274 else 1275 ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat); 1276 if (txstat & ADSTAT_Tx_LC) 1277 ifp->if_collisions++; 1278 1279 ifp->if_opackets++; 1280 } 1281 1282 /* 1283 * If there are no more pending transmissions, cancel the watchdog 1284 * timer. 1285 */ 1286 if (txs == NULL) 1287 ifp->if_timer = 0; 1288 } 1289 1290 #ifdef AE_STATS 1291 void 1292 ae_print_stats(struct ae_softc *sc) 1293 { 1294 1295 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n", 1296 sc->sc_dev.dv_xname, 1297 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to, 1298 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc); 1299 } 1300 #endif 1301 1302 /* 1303 * ae_reset: 1304 * 1305 * Perform a soft reset on the chip. 1306 */ 1307 void 1308 ae_reset(struct ae_softc *sc) 1309 { 1310 int i; 1311 1312 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR); 1313 AE_BARRIER(sc); 1314 1315 /* 1316 * The chip doesn't take itself out of reset automatically. 1317 * We need to do so after 2us. 1318 */ 1319 delay(10); 1320 AE_WRITE(sc, CSR_BUSMODE, 0); 1321 AE_BARRIER(sc); 1322 1323 for (i = 0; i < 1000; i++) { 1324 /* 1325 * Wait a bit for the reset to complete before peeking 1326 * at the chip again. 1327 */ 1328 delay(10); 1329 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0) 1330 break; 1331 } 1332 1333 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR)) 1334 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 1335 1336 delay(1000); 1337 } 1338 1339 /* 1340 * ae_init: [ ifnet interface function ] 1341 * 1342 * Initialize the interface. Must be called at splnet(). 1343 */ 1344 static int 1345 ae_init(struct ifnet *ifp) 1346 { 1347 struct ae_softc *sc = ifp->if_softc; 1348 struct ae_txsoft *txs; 1349 struct ae_rxsoft *rxs; 1350 const uint8_t *enaddr; 1351 int i, error = 0; 1352 1353 if ((error = ae_enable(sc)) != 0) 1354 goto out; 1355 1356 /* 1357 * Cancel any pending I/O. 1358 */ 1359 ae_stop(ifp, 0); 1360 1361 /* 1362 * Reset the chip to a known state. 1363 */ 1364 ae_reset(sc); 1365 1366 /* 1367 * Initialize the BUSMODE register. 1368 */ 1369 AE_WRITE(sc, CSR_BUSMODE, 1370 /* XXX: not sure if this is a good thing or not... */ 1371 //BUSMODE_ALIGN_16B | 1372 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW); 1373 AE_BARRIER(sc); 1374 1375 /* 1376 * Initialize the transmit descriptor ring. 1377 */ 1378 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1379 for (i = 0; i < AE_NTXDESC; i++) { 1380 sc->sc_txdescs[i].ad_ctl = 0; 1381 sc->sc_txdescs[i].ad_bufaddr2 = 1382 AE_CDTXADDR(sc, AE_NEXTTX(i)); 1383 } 1384 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER; 1385 AE_CDTXSYNC(sc, 0, AE_NTXDESC, 1386 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1387 sc->sc_txfree = AE_NTXDESC; 1388 sc->sc_txnext = 0; 1389 1390 /* 1391 * Initialize the transmit job descriptors. 1392 */ 1393 SIMPLEQ_INIT(&sc->sc_txfreeq); 1394 SIMPLEQ_INIT(&sc->sc_txdirtyq); 1395 for (i = 0; i < AE_TXQUEUELEN; i++) { 1396 txs = &sc->sc_txsoft[i]; 1397 txs->txs_mbuf = NULL; 1398 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1399 } 1400 1401 /* 1402 * Initialize the receive descriptor and receive job 1403 * descriptor rings. 1404 */ 1405 for (i = 0; i < AE_NRXDESC; i++) { 1406 rxs = &sc->sc_rxsoft[i]; 1407 if (rxs->rxs_mbuf == NULL) { 1408 if ((error = ae_add_rxbuf(sc, i)) != 0) { 1409 printf("%s: unable to allocate or map rx " 1410 "buffer %d, error = %d\n", 1411 sc->sc_dev.dv_xname, i, error); 1412 /* 1413 * XXX Should attempt to run with fewer receive 1414 * XXX buffers instead of just failing. 1415 */ 1416 ae_rxdrain(sc); 1417 goto out; 1418 } 1419 } else 1420 AE_INIT_RXDESC(sc, i); 1421 } 1422 sc->sc_rxptr = 0; 1423 1424 /* 1425 * Initialize the interrupt mask and enable interrupts. 1426 */ 1427 /* normal interrupts */ 1428 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS; 1429 1430 /* abnormal interrupts */ 1431 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF | 1432 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS; 1433 1434 sc->sc_rxint_mask = STATUS_RI|STATUS_RU; 1435 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT; 1436 1437 sc->sc_rxint_mask &= sc->sc_inten; 1438 sc->sc_txint_mask &= sc->sc_inten; 1439 1440 AE_WRITE(sc, CSR_INTEN, sc->sc_inten); 1441 AE_WRITE(sc, CSR_STATUS, 0xffffffff); 1442 1443 /* 1444 * Give the transmit and receive rings to the chip. 1445 */ 1446 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext)); 1447 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr)); 1448 AE_BARRIER(sc); 1449 1450 /* 1451 * Set the station address. 1452 */ 1453 enaddr = CLLADDR(ifp->if_sadl); 1454 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]); 1455 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 | 1456 enaddr[1] << 8 | enaddr[0]); 1457 AE_BARRIER(sc); 1458 1459 /* 1460 * Set the receive filter. This will start the transmit and 1461 * receive processes. 1462 */ 1463 ae_filter_setup(sc); 1464 1465 /* 1466 * Set the current media. 1467 */ 1468 ae_mediachange(ifp); 1469 1470 /* 1471 * Start the mac. 1472 */ 1473 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE); 1474 AE_BARRIER(sc); 1475 1476 /* 1477 * Write out the opmode. 1478 */ 1479 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | 1480 ae_txthresh[sc->sc_txthresh].txth_opmode); 1481 /* 1482 * Start the receive process. 1483 */ 1484 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 1485 AE_BARRIER(sc); 1486 1487 if (sc->sc_tick != NULL) { 1488 /* Start the one second clock. */ 1489 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc); 1490 } 1491 1492 /* 1493 * Note that the interface is now running. 1494 */ 1495 ifp->if_flags |= IFF_RUNNING; 1496 ifp->if_flags &= ~IFF_OACTIVE; 1497 sc->sc_if_flags = ifp->if_flags; 1498 1499 out: 1500 if (error) { 1501 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1502 ifp->if_timer = 0; 1503 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1504 } 1505 return (error); 1506 } 1507 1508 /* 1509 * ae_enable: 1510 * 1511 * Enable the chip. 1512 */ 1513 static int 1514 ae_enable(struct ae_softc *sc) 1515 { 1516 1517 if (AE_IS_ENABLED(sc) == 0) { 1518 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq, 1519 ae_intr, sc); 1520 if (sc->sc_ih == NULL) { 1521 printf("%s: unable to establish interrupt\n", 1522 sc->sc_dev.dv_xname); 1523 return (EIO); 1524 } 1525 sc->sc_flags |= AE_ENABLED; 1526 } 1527 return (0); 1528 } 1529 1530 /* 1531 * ae_disable: 1532 * 1533 * Disable the chip. 1534 */ 1535 static void 1536 ae_disable(struct ae_softc *sc) 1537 { 1538 1539 if (AE_IS_ENABLED(sc)) { 1540 arbus_intr_disestablish(sc->sc_ih); 1541 sc->sc_flags &= ~AE_ENABLED; 1542 } 1543 } 1544 1545 /* 1546 * ae_power: 1547 * 1548 * Power management (suspend/resume) hook. 1549 */ 1550 static void 1551 ae_power(int why, void *arg) 1552 { 1553 struct ae_softc *sc = arg; 1554 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1555 int s; 1556 1557 printf("power called: %d, %x\n", why, (uint32_t)arg); 1558 s = splnet(); 1559 switch (why) { 1560 case PWR_STANDBY: 1561 /* do nothing! */ 1562 break; 1563 case PWR_SUSPEND: 1564 ae_stop(ifp, 0); 1565 ae_disable(sc); 1566 break; 1567 case PWR_RESUME: 1568 if (ifp->if_flags & IFF_UP) { 1569 ae_enable(sc); 1570 ae_init(ifp); 1571 } 1572 break; 1573 case PWR_SOFTSUSPEND: 1574 case PWR_SOFTSTANDBY: 1575 case PWR_SOFTRESUME: 1576 break; 1577 } 1578 splx(s); 1579 } 1580 1581 /* 1582 * ae_rxdrain: 1583 * 1584 * Drain the receive queue. 1585 */ 1586 static void 1587 ae_rxdrain(struct ae_softc *sc) 1588 { 1589 struct ae_rxsoft *rxs; 1590 int i; 1591 1592 for (i = 0; i < AE_NRXDESC; i++) { 1593 rxs = &sc->sc_rxsoft[i]; 1594 if (rxs->rxs_mbuf != NULL) { 1595 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1596 m_freem(rxs->rxs_mbuf); 1597 rxs->rxs_mbuf = NULL; 1598 } 1599 } 1600 } 1601 1602 /* 1603 * ae_stop: [ ifnet interface function ] 1604 * 1605 * Stop transmission on the interface. 1606 */ 1607 static void 1608 ae_stop(struct ifnet *ifp, int disable) 1609 { 1610 struct ae_softc *sc = ifp->if_softc; 1611 struct ae_txsoft *txs; 1612 1613 if (sc->sc_tick != NULL) { 1614 /* Stop the one second clock. */ 1615 callout_stop(&sc->sc_tick_callout); 1616 } 1617 1618 /* Down the MII. */ 1619 mii_down(&sc->sc_mii); 1620 1621 /* Disable interrupts. */ 1622 AE_WRITE(sc, CSR_INTEN, 0); 1623 1624 /* Stop the transmit and receive processes. */ 1625 AE_WRITE(sc, CSR_OPMODE, 0); 1626 AE_WRITE(sc, CSR_RXLIST, 0); 1627 AE_WRITE(sc, CSR_TXLIST, 0); 1628 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE); 1629 AE_BARRIER(sc); 1630 1631 /* 1632 * Release any queued transmit buffers. 1633 */ 1634 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1635 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1636 if (txs->txs_mbuf != NULL) { 1637 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1638 m_freem(txs->txs_mbuf); 1639 txs->txs_mbuf = NULL; 1640 } 1641 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1642 } 1643 1644 if (disable) { 1645 ae_rxdrain(sc); 1646 ae_disable(sc); 1647 } 1648 1649 /* 1650 * Mark the interface down and cancel the watchdog timer. 1651 */ 1652 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1653 sc->sc_if_flags = ifp->if_flags; 1654 ifp->if_timer = 0; 1655 1656 /* 1657 * Reset the chip (needed on some flavors to actually disable it). 1658 */ 1659 ae_reset(sc); 1660 } 1661 1662 /* 1663 * ae_add_rxbuf: 1664 * 1665 * Add a receive buffer to the indicated descriptor. 1666 */ 1667 static int 1668 ae_add_rxbuf(struct ae_softc *sc, int idx) 1669 { 1670 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1671 struct mbuf *m; 1672 int error; 1673 1674 MGETHDR(m, M_DONTWAIT, MT_DATA); 1675 if (m == NULL) 1676 return (ENOBUFS); 1677 1678 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1679 MCLGET(m, M_DONTWAIT); 1680 if ((m->m_flags & M_EXT) == 0) { 1681 m_freem(m); 1682 return (ENOBUFS); 1683 } 1684 1685 if (rxs->rxs_mbuf != NULL) 1686 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1687 1688 rxs->rxs_mbuf = m; 1689 1690 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1691 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1692 BUS_DMA_READ|BUS_DMA_NOWAIT); 1693 if (error) { 1694 printf("%s: can't load rx DMA map %d, error = %d\n", 1695 sc->sc_dev.dv_xname, idx, error); 1696 panic("ae_add_rxbuf"); /* XXX */ 1697 } 1698 1699 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1700 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1701 1702 AE_INIT_RXDESC(sc, idx); 1703 1704 return (0); 1705 } 1706 1707 /* 1708 * ae_filter_setup: 1709 * 1710 * Set the chip's receive filter. 1711 */ 1712 static void 1713 ae_filter_setup(struct ae_softc *sc) 1714 { 1715 struct ethercom *ec = &sc->sc_ethercom; 1716 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1717 struct ether_multi *enm; 1718 struct ether_multistep step; 1719 uint32_t hash, mchash[2]; 1720 uint32_t macctl = 0; 1721 1722 /* 1723 * If the chip is running, we need to reset the interface, 1724 * and will revisit here (with IFF_RUNNING) clear. The 1725 * chip seems to really not like to have its multicast 1726 * filter programmed without a reset. 1727 */ 1728 if (ifp->if_flags & IFF_RUNNING) { 1729 (void) ae_init(ifp); 1730 return; 1731 } 1732 1733 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n", 1734 sc->sc_dev.dv_xname, sc->sc_flags)); 1735 1736 macctl = AE_READ(sc, CSR_MACCTL); 1737 macctl &= ~(MACCTL_PR | MACCTL_PM); 1738 macctl |= MACCTL_HASH; 1739 macctl |= MACCTL_HBD; 1740 macctl |= MACCTL_PR; 1741 1742 if (ifp->if_flags & IFF_PROMISC) { 1743 macctl |= MACCTL_PR; 1744 goto allmulti; 1745 } 1746 1747 mchash[0] = mchash[1] = 0; 1748 1749 ETHER_FIRST_MULTI(step, ec, enm); 1750 while (enm != NULL) { 1751 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1752 /* 1753 * We must listen to a range of multicast addresses. 1754 * For now, just accept all multicasts, rather than 1755 * trying to set only those filter bits needed to match 1756 * the range. (At this time, the only use of address 1757 * ranges is for IP multicast routing, for which the 1758 * range is big enough to require all bits set.) 1759 */ 1760 goto allmulti; 1761 } 1762 1763 /* Verify whether we use big or little endian hashes */ 1764 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f; 1765 mchash[hash >> 5] |= 1 << (hash & 0x1f); 1766 ETHER_NEXT_MULTI(step, enm); 1767 } 1768 ifp->if_flags &= ~IFF_ALLMULTI; 1769 goto setit; 1770 1771 allmulti: 1772 ifp->if_flags |= IFF_ALLMULTI; 1773 mchash[0] = mchash[1] = 0xffffffff; 1774 macctl |= MACCTL_PM; 1775 1776 setit: 1777 AE_WRITE(sc, CSR_HTHI, mchash[0]); 1778 AE_WRITE(sc, CSR_HTHI, mchash[1]); 1779 1780 AE_WRITE(sc, CSR_MACCTL, macctl); 1781 AE_BARRIER(sc); 1782 1783 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n", 1784 sc->sc_dev.dv_xname, macctl)); 1785 } 1786 1787 /* 1788 * ae_idle: 1789 * 1790 * Cause the transmit and/or receive processes to go idle. 1791 */ 1792 void 1793 ae_idle(struct ae_softc *sc, u_int32_t bits) 1794 { 1795 static const char * const txstate_names[] = { 1796 "STOPPED", 1797 "RUNNING - FETCH", 1798 "RUNNING - WAIT", 1799 "RUNNING - READING", 1800 "-- RESERVED --", 1801 "RUNNING - SETUP", 1802 "SUSPENDED", 1803 "RUNNING - CLOSE", 1804 }; 1805 static const char * const rxstate_names[] = { 1806 "STOPPED", 1807 "RUNNING - FETCH", 1808 "RUNNING - CHECK", 1809 "RUNNING - WAIT", 1810 "SUSPENDED", 1811 "RUNNING - CLOSE", 1812 "RUNNING - FLUSH", 1813 "RUNNING - QUEUE", 1814 }; 1815 1816 u_int32_t csr, ackmask = 0; 1817 int i; 1818 1819 if (bits & OPMODE_ST) 1820 ackmask |= STATUS_TPS; 1821 1822 if (bits & OPMODE_SR) 1823 ackmask |= STATUS_RPS; 1824 1825 AE_CLR(sc, CSR_OPMODE, bits); 1826 1827 for (i = 0; i < 1000; i++) { 1828 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask) 1829 break; 1830 delay(10); 1831 } 1832 1833 csr = AE_READ(sc, CSR_STATUS); 1834 if ((csr & ackmask) != ackmask) { 1835 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 && 1836 (csr & STATUS_TS) != STATUS_TS_STOPPED) { 1837 printf("%s: transmit process failed to idle: " 1838 "state %s\n", sc->sc_dev.dv_xname, 1839 txstate_names[(csr & STATUS_TS) >> 20]); 1840 } 1841 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 && 1842 (csr & STATUS_RS) != STATUS_RS_STOPPED) { 1843 printf("%s: receive process failed to idle: " 1844 "state %s\n", sc->sc_dev.dv_xname, 1845 rxstate_names[(csr & STATUS_RS) >> 17]); 1846 } 1847 } 1848 } 1849 1850 /***************************************************************************** 1851 * Generic media support functions. 1852 *****************************************************************************/ 1853 1854 /* 1855 * ae_mediastatus: [ifmedia interface function] 1856 * 1857 * Query the current media. 1858 */ 1859 void 1860 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1861 { 1862 struct ae_softc *sc = ifp->if_softc; 1863 1864 if (AE_IS_ENABLED(sc) == 0) { 1865 ifmr->ifm_active = IFM_ETHER | IFM_NONE; 1866 ifmr->ifm_status = 0; 1867 return; 1868 } 1869 1870 mii_pollstat(&sc->sc_mii); 1871 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1872 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1873 } 1874 1875 /* 1876 * ae_mediachange: [ifmedia interface function] 1877 * 1878 * Update the current media. 1879 */ 1880 int 1881 ae_mediachange(struct ifnet *ifp) 1882 { 1883 struct ae_softc *sc = ifp->if_softc; 1884 1885 if ((ifp->if_flags & IFF_UP) == 0) 1886 return (0); 1887 1888 mii_mediachg(&sc->sc_mii); 1889 return (0); 1890 } 1891 1892 /***************************************************************************** 1893 * Support functions for MII-attached media. 1894 *****************************************************************************/ 1895 1896 /* 1897 * ae_mii_tick: 1898 * 1899 * One second timer, used to tick the MII. 1900 */ 1901 static void 1902 ae_mii_tick(void *arg) 1903 { 1904 struct ae_softc *sc = arg; 1905 int s; 1906 1907 if (!device_is_active(&sc->sc_dev)) 1908 return; 1909 1910 s = splnet(); 1911 mii_tick(&sc->sc_mii); 1912 splx(s); 1913 1914 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc); 1915 } 1916 1917 /* 1918 * ae_mii_statchg: [mii interface function] 1919 * 1920 * Callback from PHY when media changes. 1921 */ 1922 static void 1923 ae_mii_statchg(struct device *self) 1924 { 1925 struct ae_softc *sc = (struct ae_softc *)self; 1926 uint32_t macctl, flowc; 1927 1928 //opmode = AE_READ(sc, CSR_OPMODE); 1929 macctl = AE_READ(sc, CSR_MACCTL); 1930 1931 /* XXX: do we need to do this? */ 1932 /* Idle the transmit and receive processes. */ 1933 //ae_idle(sc, OPMODE_ST|OPMODE_SR); 1934 1935 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1936 flowc = FLOWC_FCE; 1937 macctl &= ~MACCTL_DRO; 1938 macctl |= MACCTL_FDX; 1939 } else { 1940 flowc = 0; /* cannot do flow control in HDX */ 1941 macctl |= MACCTL_DRO; 1942 macctl &= ~MACCTL_FDX; 1943 } 1944 1945 AE_WRITE(sc, CSR_FLOWC, flowc); 1946 AE_WRITE(sc, CSR_MACCTL, macctl); 1947 1948 /* restore operational mode */ 1949 //AE_WRITE(sc, CSR_OPMODE, opmode); 1950 AE_BARRIER(sc); 1951 } 1952 1953 /* 1954 * ae_mii_readreg: 1955 * 1956 * Read a PHY register. 1957 */ 1958 static int 1959 ae_mii_readreg(struct device *self, int phy, int reg) 1960 { 1961 struct ae_softc *sc = (struct ae_softc *)self; 1962 uint32_t addr; 1963 int i; 1964 1965 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT); 1966 AE_WRITE(sc, CSR_MIIADDR, addr); 1967 AE_BARRIER(sc); 1968 for (i = 0; i < 100000000; i++) { 1969 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1970 break; 1971 } 1972 1973 return (AE_READ(sc, CSR_MIIDATA) & 0xffff); 1974 } 1975 1976 /* 1977 * ae_mii_writereg: 1978 * 1979 * Write a PHY register. 1980 */ 1981 static void 1982 ae_mii_writereg(struct device *self, int phy, int reg, int val) 1983 { 1984 struct ae_softc *sc = (struct ae_softc *)self; 1985 uint32_t addr; 1986 int i; 1987 1988 /* write the data register */ 1989 AE_WRITE(sc, CSR_MIIDATA, val); 1990 1991 /* write the address to latch it in */ 1992 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) | 1993 MIIADDR_WRITE; 1994 AE_WRITE(sc, CSR_MIIADDR, addr); 1995 AE_BARRIER(sc); 1996 1997 for (i = 0; i < 100000000; i++) { 1998 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1999 break; 2000 } 2001 } 2002