1 /* $Id: if_ae.c,v 1.24 2012/10/27 17:18:02 chs Exp $ */ 2 /*- 3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 4 * Copyright (c) 2006 Garrett D'Amore. 5 * All rights reserved. 6 * 7 * This code was written by Garrett D'Amore for the Champaign-Urbana 8 * Community Wireless Network Project. 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 3. All advertising materials mentioning features or use of this 20 * software must display the following acknowledgements: 21 * This product includes software developed by the Urbana-Champaign 22 * Independent Media Center. 23 * This product includes software developed by Garrett D'Amore. 24 * 4. Urbana-Champaign Independent Media Center's name and Garrett 25 * D'Amore's name may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 /*- 43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 44 * All rights reserved. 45 * 46 * This code is derived from software contributed to The NetBSD Foundation 47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 48 * NASA Ames Research Center; and by Charles M. Hannum. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 69 * POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72 /* 73 * Device driver for the onboard ethernet MAC found on the AR5312 74 * chip's AHB bus. 75 * 76 * This device is very simliar to the tulip in most regards, and 77 * the code is directly derived from NetBSD's tulip.c. However, it 78 * is different enough that it did not seem to be a good idea to 79 * add further complexity to the tulip driver, so we have our own. 80 * 81 * Also tulip has a lot of complexity in it for various parts/options 82 * that we don't need, and on these little boxes with only ~8MB RAM, we 83 * don't want any extra bloat. 84 */ 85 86 /* 87 * TODO: 88 * 89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align 90 * inbound packets on a half-word boundary, which would make life easier 91 * for TCP/IP. (Aligning IP headers on a word.) 92 * 93 * 2) There is stuff in original tulip to shut down the device when reacting 94 * to a a change in link status. Is that needed. 95 * 96 * 3) Test with variety of 10/100 HDX/FDX scenarios. 97 * 98 */ 99 100 #include <sys/cdefs.h> 101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.24 2012/10/27 17:18:02 chs Exp $"); 102 103 104 #include <sys/param.h> 105 #include <sys/bus.h> 106 #include <sys/callout.h> 107 #include <sys/device.h> 108 #include <sys/endian.h> 109 #include <sys/errno.h> 110 #include <sys/intr.h> 111 #include <sys/ioctl.h> 112 #include <sys/kernel.h> 113 #include <sys/malloc.h> 114 #include <sys/mbuf.h> 115 #include <sys/socket.h> 116 117 #include <uvm/uvm_extern.h> 118 119 #include <net/if.h> 120 #include <net/if_dl.h> 121 #include <net/if_media.h> 122 #include <net/if_ether.h> 123 124 #include <net/bpf.h> 125 126 #include <dev/mii/mii.h> 127 #include <dev/mii/miivar.h> 128 #include <dev/mii/mii_bitbang.h> 129 130 #include <mips/atheros/include/arbusvar.h> 131 #include <mips/atheros/dev/aereg.h> 132 #include <mips/atheros/dev/aevar.h> 133 134 static const struct { 135 u_int32_t txth_opmode; /* OPMODE bits */ 136 const char *txth_name; /* name of mode */ 137 } ae_txthresh[] = { 138 { OPMODE_TR_32, "32 words" }, 139 { OPMODE_TR_64, "64 words" }, 140 { OPMODE_TR_128, "128 words" }, 141 { OPMODE_TR_256, "256 words" }, 142 { OPMODE_SF, "store and forward mode" }, 143 { 0, NULL }, 144 }; 145 146 static int ae_match(device_t, struct cfdata *, void *); 147 static void ae_attach(device_t, device_t, void *); 148 static int ae_detach(device_t, int); 149 static int ae_activate(device_t, enum devact); 150 151 static int ae_ifflags_cb(struct ethercom *); 152 static void ae_reset(struct ae_softc *); 153 static void ae_idle(struct ae_softc *, u_int32_t); 154 155 static void ae_start(struct ifnet *); 156 static void ae_watchdog(struct ifnet *); 157 static int ae_ioctl(struct ifnet *, u_long, void *); 158 static int ae_init(struct ifnet *); 159 static void ae_stop(struct ifnet *, int); 160 161 static void ae_shutdown(void *); 162 163 static void ae_rxdrain(struct ae_softc *); 164 static int ae_add_rxbuf(struct ae_softc *, int); 165 166 static int ae_enable(struct ae_softc *); 167 static void ae_disable(struct ae_softc *); 168 static void ae_power(int, void *); 169 170 static void ae_filter_setup(struct ae_softc *); 171 172 static int ae_intr(void *); 173 static void ae_rxintr(struct ae_softc *); 174 static void ae_txintr(struct ae_softc *); 175 176 static void ae_mii_tick(void *); 177 static void ae_mii_statchg(struct ifnet *); 178 179 static int ae_mii_readreg(device_t, int, int); 180 static void ae_mii_writereg(device_t, int, int, int); 181 182 #ifdef AE_DEBUG 183 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 184 printf x 185 #else 186 #define DPRINTF(sc, x) /* nothing */ 187 #endif 188 189 #ifdef AE_STATS 190 static void ae_print_stats(struct ae_softc *); 191 #endif 192 193 CFATTACH_DECL_NEW(ae, sizeof(struct ae_softc), 194 ae_match, ae_attach, ae_detach, ae_activate); 195 196 /* 197 * ae_match: 198 * 199 * Check for a device match. 200 */ 201 int 202 ae_match(device_t parent, struct cfdata *cf, void *aux) 203 { 204 struct arbus_attach_args *aa = aux; 205 206 if (strcmp(aa->aa_name, cf->cf_name) == 0) 207 return 1; 208 209 return 0; 210 211 } 212 213 /* 214 * ae_attach: 215 * 216 * Attach an ae interface to the system. 217 */ 218 void 219 ae_attach(device_t parent, device_t self, void *aux) 220 { 221 const uint8_t *enaddr; 222 prop_data_t ea; 223 struct ae_softc *sc = device_private(self); 224 struct arbus_attach_args *aa = aux; 225 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 226 int i, error; 227 228 sc->sc_dev = self; 229 230 callout_init(&sc->sc_tick_callout, 0); 231 232 printf(": Atheros AR531X 10/100 Ethernet\n"); 233 234 /* 235 * Try to get MAC address. 236 */ 237 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address"); 238 if (ea == NULL) { 239 printf("%s: unable to get mac-addr property\n", 240 device_xname(sc->sc_dev)); 241 return; 242 } 243 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 244 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 245 enaddr = prop_data_data_nocopy(ea); 246 247 /* Announce ourselves. */ 248 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev), 249 ether_sprintf(enaddr)); 250 251 sc->sc_cirq = aa->aa_cirq; 252 sc->sc_mirq = aa->aa_mirq; 253 sc->sc_st = aa->aa_bst; 254 sc->sc_dmat = aa->aa_dmat; 255 256 SIMPLEQ_INIT(&sc->sc_txfreeq); 257 SIMPLEQ_INIT(&sc->sc_txdirtyq); 258 259 /* 260 * Map registers. 261 */ 262 sc->sc_size = aa->aa_size; 263 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0, 264 &sc->sc_sh)) != 0) { 265 printf("%s: unable to map registers, error = %d\n", 266 device_xname(sc->sc_dev), error); 267 goto fail_0; 268 } 269 270 /* 271 * Allocate the control data structures, and create and load the 272 * DMA map for it. 273 */ 274 if ((error = bus_dmamem_alloc(sc->sc_dmat, 275 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 276 1, &sc->sc_cdnseg, 0)) != 0) { 277 printf("%s: unable to allocate control data, error = %d\n", 278 device_xname(sc->sc_dev), error); 279 goto fail_1; 280 } 281 282 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg, 283 sizeof(struct ae_control_data), (void **)&sc->sc_control_data, 284 BUS_DMA_COHERENT)) != 0) { 285 printf("%s: unable to map control data, error = %d\n", 286 device_xname(sc->sc_dev), error); 287 goto fail_2; 288 } 289 290 if ((error = bus_dmamap_create(sc->sc_dmat, 291 sizeof(struct ae_control_data), 1, 292 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 293 printf("%s: unable to create control data DMA map, " 294 "error = %d\n", device_xname(sc->sc_dev), error); 295 goto fail_3; 296 } 297 298 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 299 sc->sc_control_data, sizeof(struct ae_control_data), NULL, 300 0)) != 0) { 301 printf("%s: unable to load control data DMA map, error = %d\n", 302 device_xname(sc->sc_dev), error); 303 goto fail_4; 304 } 305 306 /* 307 * Create the transmit buffer DMA maps. 308 */ 309 for (i = 0; i < AE_TXQUEUELEN; i++) { 310 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 311 AE_NTXSEGS, MCLBYTES, 0, 0, 312 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 313 printf("%s: unable to create tx DMA map %d, " 314 "error = %d\n", device_xname(sc->sc_dev), i, error); 315 goto fail_5; 316 } 317 } 318 319 /* 320 * Create the receive buffer DMA maps. 321 */ 322 for (i = 0; i < AE_NRXDESC; i++) { 323 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 324 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 325 printf("%s: unable to create rx DMA map %d, " 326 "error = %d\n", device_xname(sc->sc_dev), i, error); 327 goto fail_6; 328 } 329 sc->sc_rxsoft[i].rxs_mbuf = NULL; 330 } 331 332 /* 333 * Reset the chip to a known state. 334 */ 335 ae_reset(sc); 336 337 /* 338 * From this point forward, the attachment cannot fail. A failure 339 * before this point releases all resources that may have been 340 * allocated. 341 */ 342 sc->sc_flags |= AE_ATTACHED; 343 344 /* 345 * Initialize our media structures. This may probe the MII, if 346 * present. 347 */ 348 sc->sc_mii.mii_ifp = ifp; 349 sc->sc_mii.mii_readreg = ae_mii_readreg; 350 sc->sc_mii.mii_writereg = ae_mii_writereg; 351 sc->sc_mii.mii_statchg = ae_mii_statchg; 352 sc->sc_ethercom.ec_mii = &sc->sc_mii; 353 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 354 ether_mediastatus); 355 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 356 MII_OFFSET_ANY, 0); 357 358 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 359 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 360 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 361 } else 362 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 363 364 sc->sc_tick = ae_mii_tick; 365 366 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 367 ifp->if_softc = sc; 368 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 369 sc->sc_if_flags = ifp->if_flags; 370 ifp->if_ioctl = ae_ioctl; 371 ifp->if_start = ae_start; 372 ifp->if_watchdog = ae_watchdog; 373 ifp->if_init = ae_init; 374 ifp->if_stop = ae_stop; 375 IFQ_SET_READY(&ifp->if_snd); 376 377 /* 378 * We can support 802.1Q VLAN-sized frames. 379 */ 380 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 381 382 /* 383 * Attach the interface. 384 */ 385 if_attach(ifp); 386 ether_ifattach(ifp, enaddr); 387 ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb); 388 389 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 390 RND_TYPE_NET, 0); 391 392 /* 393 * Make sure the interface is shutdown during reboot. 394 */ 395 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc); 396 if (sc->sc_sdhook == NULL) 397 printf("%s: WARNING: unable to establish shutdown hook\n", 398 device_xname(sc->sc_dev)); 399 400 /* 401 * Add a suspend hook to make sure we come back up after a 402 * resume. 403 */ 404 sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev), 405 ae_power, sc); 406 if (sc->sc_powerhook == NULL) 407 printf("%s: WARNING: unable to establish power hook\n", 408 device_xname(sc->sc_dev)); 409 return; 410 411 /* 412 * Free any resources we've allocated during the failed attach 413 * attempt. Do this in reverse order and fall through. 414 */ 415 fail_6: 416 for (i = 0; i < AE_NRXDESC; i++) { 417 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 418 bus_dmamap_destroy(sc->sc_dmat, 419 sc->sc_rxsoft[i].rxs_dmamap); 420 } 421 fail_5: 422 for (i = 0; i < AE_TXQUEUELEN; i++) { 423 if (sc->sc_txsoft[i].txs_dmamap != NULL) 424 bus_dmamap_destroy(sc->sc_dmat, 425 sc->sc_txsoft[i].txs_dmamap); 426 } 427 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 428 fail_4: 429 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 430 fail_3: 431 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 432 sizeof(struct ae_control_data)); 433 fail_2: 434 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 435 fail_1: 436 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 437 fail_0: 438 return; 439 } 440 441 /* 442 * ae_activate: 443 * 444 * Handle device activation/deactivation requests. 445 */ 446 int 447 ae_activate(device_t self, enum devact act) 448 { 449 struct ae_softc *sc = device_private(self); 450 451 switch (act) { 452 case DVACT_DEACTIVATE: 453 if_deactivate(&sc->sc_ethercom.ec_if); 454 return 0; 455 default: 456 return EOPNOTSUPP; 457 } 458 } 459 460 /* 461 * ae_detach: 462 * 463 * Detach a device interface. 464 */ 465 int 466 ae_detach(device_t self, int flags) 467 { 468 struct ae_softc *sc = device_private(self); 469 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 470 struct ae_rxsoft *rxs; 471 struct ae_txsoft *txs; 472 int i; 473 474 /* 475 * Succeed now if there isn't any work to do. 476 */ 477 if ((sc->sc_flags & AE_ATTACHED) == 0) 478 return (0); 479 480 /* Unhook our tick handler. */ 481 if (sc->sc_tick) 482 callout_stop(&sc->sc_tick_callout); 483 484 /* Detach all PHYs */ 485 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 486 487 /* Delete all remaining media. */ 488 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 489 490 rnd_detach_source(&sc->sc_rnd_source); 491 ether_ifdetach(ifp); 492 if_detach(ifp); 493 494 for (i = 0; i < AE_NRXDESC; i++) { 495 rxs = &sc->sc_rxsoft[i]; 496 if (rxs->rxs_mbuf != NULL) { 497 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 498 m_freem(rxs->rxs_mbuf); 499 rxs->rxs_mbuf = NULL; 500 } 501 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); 502 } 503 for (i = 0; i < AE_TXQUEUELEN; i++) { 504 txs = &sc->sc_txsoft[i]; 505 if (txs->txs_mbuf != NULL) { 506 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 507 m_freem(txs->txs_mbuf); 508 txs->txs_mbuf = NULL; 509 } 510 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); 511 } 512 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 513 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 514 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 515 sizeof(struct ae_control_data)); 516 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 517 518 shutdownhook_disestablish(sc->sc_sdhook); 519 powerhook_disestablish(sc->sc_powerhook); 520 521 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 522 523 524 return (0); 525 } 526 527 /* 528 * ae_shutdown: 529 * 530 * Make sure the interface is stopped at reboot time. 531 */ 532 static void 533 ae_shutdown(void *arg) 534 { 535 struct ae_softc *sc = arg; 536 537 ae_stop(&sc->sc_ethercom.ec_if, 1); 538 } 539 540 /* 541 * ae_start: [ifnet interface function] 542 * 543 * Start packet transmission on the interface. 544 */ 545 static void 546 ae_start(struct ifnet *ifp) 547 { 548 struct ae_softc *sc = ifp->if_softc; 549 struct mbuf *m0, *m; 550 struct ae_txsoft *txs, *last_txs = NULL; 551 bus_dmamap_t dmamap; 552 int error, firsttx, nexttx, lasttx = 1, ofree, seg; 553 554 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n", 555 device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags)); 556 557 558 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 559 return; 560 561 /* 562 * Remember the previous number of free descriptors and 563 * the first descriptor we'll use. 564 */ 565 ofree = sc->sc_txfree; 566 firsttx = sc->sc_txnext; 567 568 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n", 569 device_xname(sc->sc_dev), ofree, firsttx)); 570 571 /* 572 * Loop through the send queue, setting up transmit descriptors 573 * until we drain the queue, or use up all available transmit 574 * descriptors. 575 */ 576 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 577 sc->sc_txfree != 0) { 578 /* 579 * Grab a packet off the queue. 580 */ 581 IFQ_POLL(&ifp->if_snd, m0); 582 if (m0 == NULL) 583 break; 584 m = NULL; 585 586 dmamap = txs->txs_dmamap; 587 588 /* 589 * Load the DMA map. If this fails, the packet either 590 * didn't fit in the alloted number of segments, or we were 591 * short on resources. In this case, we'll copy and try 592 * again. 593 */ 594 if (((mtod(m0, uintptr_t) & 3) != 0) || 595 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 596 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 597 MGETHDR(m, M_DONTWAIT, MT_DATA); 598 if (m == NULL) { 599 printf("%s: unable to allocate Tx mbuf\n", 600 device_xname(sc->sc_dev)); 601 break; 602 } 603 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 604 if (m0->m_pkthdr.len > MHLEN) { 605 MCLGET(m, M_DONTWAIT); 606 if ((m->m_flags & M_EXT) == 0) { 607 printf("%s: unable to allocate Tx " 608 "cluster\n", device_xname(sc->sc_dev)); 609 m_freem(m); 610 break; 611 } 612 } 613 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 614 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 615 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 616 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 617 if (error) { 618 printf("%s: unable to load Tx buffer, " 619 "error = %d\n", device_xname(sc->sc_dev), 620 error); 621 break; 622 } 623 } 624 625 /* 626 * Ensure we have enough descriptors free to describe 627 * the packet. 628 */ 629 if (dmamap->dm_nsegs > sc->sc_txfree) { 630 /* 631 * Not enough free descriptors to transmit this 632 * packet. We haven't committed to anything yet, 633 * so just unload the DMA map, put the packet 634 * back on the queue, and punt. Notify the upper 635 * layer that there are no more slots left. 636 * 637 * XXX We could allocate an mbuf and copy, but 638 * XXX it is worth it? 639 */ 640 ifp->if_flags |= IFF_OACTIVE; 641 bus_dmamap_unload(sc->sc_dmat, dmamap); 642 if (m != NULL) 643 m_freem(m); 644 break; 645 } 646 647 IFQ_DEQUEUE(&ifp->if_snd, m0); 648 if (m != NULL) { 649 m_freem(m0); 650 m0 = m; 651 } 652 653 /* 654 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 655 */ 656 657 /* Sync the DMA map. */ 658 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 659 BUS_DMASYNC_PREWRITE); 660 661 /* 662 * Initialize the transmit descriptors. 663 */ 664 for (nexttx = sc->sc_txnext, seg = 0; 665 seg < dmamap->dm_nsegs; 666 seg++, nexttx = AE_NEXTTX(nexttx)) { 667 /* 668 * If this is the first descriptor we're 669 * enqueueing, don't set the OWN bit just 670 * yet. That could cause a race condition. 671 * We'll do it below. 672 */ 673 sc->sc_txdescs[nexttx].ad_status = 674 (nexttx == firsttx) ? 0 : ADSTAT_OWN; 675 sc->sc_txdescs[nexttx].ad_bufaddr1 = 676 dmamap->dm_segs[seg].ds_addr; 677 sc->sc_txdescs[nexttx].ad_ctl = 678 (dmamap->dm_segs[seg].ds_len << 679 ADCTL_SIZE1_SHIFT) | 680 (nexttx == (AE_NTXDESC - 1) ? 681 ADCTL_ER : 0); 682 lasttx = nexttx; 683 } 684 685 KASSERT(lasttx != -1); 686 687 /* Set `first segment' and `last segment' appropriately. */ 688 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS; 689 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS; 690 691 #ifdef AE_DEBUG 692 if (ifp->if_flags & IFF_DEBUG) { 693 printf(" txsoft %p transmit chain:\n", txs); 694 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) { 695 printf(" descriptor %d:\n", seg); 696 printf(" ad_status: 0x%08x\n", 697 sc->sc_txdescs[seg].ad_status); 698 printf(" ad_ctl: 0x%08x\n", 699 sc->sc_txdescs[seg].ad_ctl); 700 printf(" ad_bufaddr1: 0x%08x\n", 701 sc->sc_txdescs[seg].ad_bufaddr1); 702 printf(" ad_bufaddr2: 0x%08x\n", 703 sc->sc_txdescs[seg].ad_bufaddr2); 704 if (seg == lasttx) 705 break; 706 } 707 } 708 #endif 709 710 /* Sync the descriptors we're using. */ 711 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 712 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 713 714 /* 715 * Store a pointer to the packet so we can free it later, 716 * and remember what txdirty will be once the packet is 717 * done. 718 */ 719 txs->txs_mbuf = m0; 720 txs->txs_firstdesc = sc->sc_txnext; 721 txs->txs_lastdesc = lasttx; 722 txs->txs_ndescs = dmamap->dm_nsegs; 723 724 /* Advance the tx pointer. */ 725 sc->sc_txfree -= dmamap->dm_nsegs; 726 sc->sc_txnext = nexttx; 727 728 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 729 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 730 731 last_txs = txs; 732 733 /* 734 * Pass the packet to any BPF listeners. 735 */ 736 bpf_mtap(ifp, m0); 737 } 738 739 if (txs == NULL || sc->sc_txfree == 0) { 740 /* No more slots left; notify upper layer. */ 741 ifp->if_flags |= IFF_OACTIVE; 742 } 743 744 if (sc->sc_txfree != ofree) { 745 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 746 device_xname(sc->sc_dev), lasttx, firsttx)); 747 /* 748 * Cause a transmit interrupt to happen on the 749 * last packet we enqueued. 750 */ 751 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC; 752 AE_CDTXSYNC(sc, lasttx, 1, 753 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 754 755 /* 756 * The entire packet chain is set up. Give the 757 * first descriptor to the chip now. 758 */ 759 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN; 760 AE_CDTXSYNC(sc, firsttx, 1, 761 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 762 763 /* Wake up the transmitter. */ 764 /* XXX USE AUTOPOLLING? */ 765 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD); 766 AE_BARRIER(sc); 767 768 /* Set a watchdog timer in case the chip flakes out. */ 769 ifp->if_timer = 5; 770 } 771 } 772 773 /* 774 * ae_watchdog: [ifnet interface function] 775 * 776 * Watchdog timer handler. 777 */ 778 static void 779 ae_watchdog(struct ifnet *ifp) 780 { 781 struct ae_softc *sc = ifp->if_softc; 782 int doing_transmit; 783 784 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq)); 785 786 if (doing_transmit) { 787 printf("%s: transmit timeout\n", device_xname(sc->sc_dev)); 788 ifp->if_oerrors++; 789 } 790 else 791 printf("%s: spurious watchdog timeout\n", device_xname(sc->sc_dev)); 792 793 (void) ae_init(ifp); 794 795 /* Try to get more packets going. */ 796 ae_start(ifp); 797 } 798 799 /* If the interface is up and running, only modify the receive 800 * filter when changing to/from promiscuous mode. Otherwise return 801 * ENETRESET so that ether_ioctl will reset the chip. 802 */ 803 static int 804 ae_ifflags_cb(struct ethercom *ec) 805 { 806 struct ifnet *ifp = &ec->ec_if; 807 struct ae_softc *sc = ifp->if_softc; 808 int change = ifp->if_flags ^ sc->sc_if_flags; 809 810 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 811 return ENETRESET; 812 else if ((change & IFF_PROMISC) != 0) 813 ae_filter_setup(sc); 814 return 0; 815 } 816 817 /* 818 * ae_ioctl: [ifnet interface function] 819 * 820 * Handle control requests from the operator. 821 */ 822 static int 823 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data) 824 { 825 struct ae_softc *sc = ifp->if_softc; 826 int s, error; 827 828 s = splnet(); 829 830 error = ether_ioctl(ifp, cmd, data); 831 if (error == ENETRESET) { 832 if (ifp->if_flags & IFF_RUNNING) { 833 /* 834 * Multicast list has changed. Set the 835 * hardware filter accordingly. 836 */ 837 ae_filter_setup(sc); 838 } 839 error = 0; 840 } 841 842 /* Try to get more packets going. */ 843 if (AE_IS_ENABLED(sc)) 844 ae_start(ifp); 845 846 sc->sc_if_flags = ifp->if_flags; 847 splx(s); 848 return (error); 849 } 850 851 /* 852 * ae_intr: 853 * 854 * Interrupt service routine. 855 */ 856 int 857 ae_intr(void *arg) 858 { 859 struct ae_softc *sc = arg; 860 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 861 u_int32_t status, rxstatus, txstatus; 862 int handled = 0, txthresh; 863 864 DPRINTF(sc, ("%s: ae_intr\n", device_xname(sc->sc_dev))); 865 866 #ifdef DEBUG 867 if (AE_IS_ENABLED(sc) == 0) 868 panic("%s: ae_intr: not enabled", device_xname(sc->sc_dev)); 869 #endif 870 871 /* 872 * If the interface isn't running, the interrupt couldn't 873 * possibly have come from us. 874 */ 875 if ((ifp->if_flags & IFF_RUNNING) == 0 || 876 !device_is_active(sc->sc_dev)) { 877 printf("spurious?!?\n"); 878 return (0); 879 } 880 881 for (;;) { 882 status = AE_READ(sc, CSR_STATUS); 883 if (status) { 884 AE_WRITE(sc, CSR_STATUS, status); 885 AE_BARRIER(sc); 886 } 887 888 if ((status & sc->sc_inten) == 0) 889 break; 890 891 handled = 1; 892 893 rxstatus = status & sc->sc_rxint_mask; 894 txstatus = status & sc->sc_txint_mask; 895 896 if (rxstatus) { 897 /* Grab new any new packets. */ 898 ae_rxintr(sc); 899 900 if (rxstatus & STATUS_RU) { 901 printf("%s: receive ring overrun\n", 902 device_xname(sc->sc_dev)); 903 /* Get the receive process going again. */ 904 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 905 AE_BARRIER(sc); 906 break; 907 } 908 } 909 910 if (txstatus) { 911 /* Sweep up transmit descriptors. */ 912 ae_txintr(sc); 913 914 if (txstatus & STATUS_TJT) 915 printf("%s: transmit jabber timeout\n", 916 device_xname(sc->sc_dev)); 917 918 if (txstatus & STATUS_UNF) { 919 /* 920 * Increase our transmit threshold if 921 * another is available. 922 */ 923 txthresh = sc->sc_txthresh + 1; 924 if (ae_txthresh[txthresh].txth_name != NULL) { 925 uint32_t opmode; 926 /* Idle the transmit process. */ 927 opmode = AE_READ(sc, CSR_OPMODE); 928 ae_idle(sc, OPMODE_ST); 929 930 sc->sc_txthresh = txthresh; 931 opmode &= 932 ~(OPMODE_TR|OPMODE_SF); 933 opmode |= 934 ae_txthresh[txthresh].txth_opmode; 935 printf("%s: transmit underrun; new " 936 "threshold: %s\n", 937 device_xname(sc->sc_dev), 938 ae_txthresh[txthresh].txth_name); 939 940 /* 941 * Set the new threshold and restart 942 * the transmit process. 943 */ 944 AE_WRITE(sc, CSR_OPMODE, opmode); 945 AE_BARRIER(sc); 946 } 947 /* 948 * XXX Log every Nth underrun from 949 * XXX now on? 950 */ 951 } 952 } 953 954 if (status & (STATUS_TPS|STATUS_RPS)) { 955 if (status & STATUS_TPS) 956 printf("%s: transmit process stopped\n", 957 device_xname(sc->sc_dev)); 958 if (status & STATUS_RPS) 959 printf("%s: receive process stopped\n", 960 device_xname(sc->sc_dev)); 961 (void) ae_init(ifp); 962 break; 963 } 964 965 if (status & STATUS_SE) { 966 const char *str; 967 968 if (status & STATUS_TX_ABORT) 969 str = "tx abort"; 970 else if (status & STATUS_RX_ABORT) 971 str = "rx abort"; 972 else 973 str = "unknown error"; 974 975 printf("%s: fatal system error: %s\n", 976 device_xname(sc->sc_dev), str); 977 (void) ae_init(ifp); 978 break; 979 } 980 981 /* 982 * Not handled: 983 * 984 * Transmit buffer unavailable -- normal 985 * condition, nothing to do, really. 986 * 987 * General purpose timer experied -- we don't 988 * use the general purpose timer. 989 * 990 * Early receive interrupt -- not available on 991 * all chips, we just use RI. We also only 992 * use single-segment receive DMA, so this 993 * is mostly useless. 994 */ 995 } 996 997 /* Try to get more packets going. */ 998 ae_start(ifp); 999 1000 if (handled) 1001 rnd_add_uint32(&sc->sc_rnd_source, status); 1002 return (handled); 1003 } 1004 1005 /* 1006 * ae_rxintr: 1007 * 1008 * Helper; handle receive interrupts. 1009 */ 1010 static void 1011 ae_rxintr(struct ae_softc *sc) 1012 { 1013 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1014 struct ether_header *eh; 1015 struct ae_rxsoft *rxs; 1016 struct mbuf *m; 1017 u_int32_t rxstat; 1018 int i, len; 1019 1020 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) { 1021 rxs = &sc->sc_rxsoft[i]; 1022 1023 AE_CDRXSYNC(sc, i, 1024 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1025 1026 rxstat = sc->sc_rxdescs[i].ad_status; 1027 1028 if (rxstat & ADSTAT_OWN) { 1029 /* 1030 * We have processed all of the receive buffers. 1031 */ 1032 break; 1033 } 1034 1035 /* 1036 * If any collisions were seen on the wire, count one. 1037 */ 1038 if (rxstat & ADSTAT_Rx_CS) 1039 ifp->if_collisions++; 1040 1041 /* 1042 * If an error occurred, update stats, clear the status 1043 * word, and leave the packet buffer in place. It will 1044 * simply be reused the next time the ring comes around. 1045 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long 1046 * error. 1047 */ 1048 if (rxstat & ADSTAT_ES && 1049 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 || 1050 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF | 1051 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) { 1052 #define PRINTERR(bit, str) \ 1053 if (rxstat & (bit)) \ 1054 printf("%s: receive error: %s\n", \ 1055 device_xname(sc->sc_dev), str) 1056 ifp->if_ierrors++; 1057 PRINTERR(ADSTAT_Rx_DE, "descriptor error"); 1058 PRINTERR(ADSTAT_Rx_RF, "runt frame"); 1059 PRINTERR(ADSTAT_Rx_TL, "frame too long"); 1060 PRINTERR(ADSTAT_Rx_RE, "MII error"); 1061 PRINTERR(ADSTAT_Rx_DB, "dribbling bit"); 1062 PRINTERR(ADSTAT_Rx_CE, "CRC error"); 1063 #undef PRINTERR 1064 AE_INIT_RXDESC(sc, i); 1065 continue; 1066 } 1067 1068 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1069 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1070 1071 /* 1072 * No errors; receive the packet. Note the chip 1073 * includes the CRC with every packet. 1074 */ 1075 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN; 1076 1077 /* 1078 * XXX: the Atheros part can align on half words. what 1079 * is the performance implication of this? Probably 1080 * minimal, and we should use it... 1081 */ 1082 #ifdef __NO_STRICT_ALIGNMENT 1083 /* 1084 * Allocate a new mbuf cluster. If that fails, we are 1085 * out of memory, and must drop the packet and recycle 1086 * the buffer that's already attached to this descriptor. 1087 */ 1088 m = rxs->rxs_mbuf; 1089 if (ae_add_rxbuf(sc, i) != 0) { 1090 ifp->if_ierrors++; 1091 AE_INIT_RXDESC(sc, i); 1092 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1093 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1094 continue; 1095 } 1096 #else 1097 /* 1098 * The chip's receive buffers must be 4-byte aligned. 1099 * But this means that the data after the Ethernet header 1100 * is misaligned. We must allocate a new buffer and 1101 * copy the data, shifted forward 2 bytes. 1102 */ 1103 MGETHDR(m, M_DONTWAIT, MT_DATA); 1104 if (m == NULL) { 1105 dropit: 1106 ifp->if_ierrors++; 1107 AE_INIT_RXDESC(sc, i); 1108 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1109 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1110 continue; 1111 } 1112 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1113 if (len > (MHLEN - 2)) { 1114 MCLGET(m, M_DONTWAIT); 1115 if ((m->m_flags & M_EXT) == 0) { 1116 m_freem(m); 1117 goto dropit; 1118 } 1119 } 1120 m->m_data += 2; 1121 1122 /* 1123 * Note that we use clusters for incoming frames, so the 1124 * buffer is virtually contiguous. 1125 */ 1126 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); 1127 1128 /* Allow the receive descriptor to continue using its mbuf. */ 1129 AE_INIT_RXDESC(sc, i); 1130 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1131 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1132 #endif /* __NO_STRICT_ALIGNMENT */ 1133 1134 ifp->if_ipackets++; 1135 eh = mtod(m, struct ether_header *); 1136 m->m_pkthdr.rcvif = ifp; 1137 m->m_pkthdr.len = m->m_len = len; 1138 1139 /* 1140 * Pass this up to any BPF listeners, but only 1141 * pass it up the stack if its for us. 1142 */ 1143 bpf_mtap(ifp, m); 1144 1145 /* Pass it on. */ 1146 (*ifp->if_input)(ifp, m); 1147 } 1148 1149 /* Update the receive pointer. */ 1150 sc->sc_rxptr = i; 1151 } 1152 1153 /* 1154 * ae_txintr: 1155 * 1156 * Helper; handle transmit interrupts. 1157 */ 1158 static void 1159 ae_txintr(struct ae_softc *sc) 1160 { 1161 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1162 struct ae_txsoft *txs; 1163 u_int32_t txstat; 1164 1165 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n", 1166 device_xname(sc->sc_dev), sc->sc_flags)); 1167 1168 ifp->if_flags &= ~IFF_OACTIVE; 1169 1170 /* 1171 * Go through our Tx list and free mbufs for those 1172 * frames that have been transmitted. 1173 */ 1174 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1175 AE_CDTXSYNC(sc, txs->txs_lastdesc, 1176 txs->txs_ndescs, 1177 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1178 1179 #ifdef AE_DEBUG 1180 if (ifp->if_flags & IFF_DEBUG) { 1181 int i; 1182 printf(" txsoft %p transmit chain:\n", txs); 1183 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) { 1184 printf(" descriptor %d:\n", i); 1185 printf(" ad_status: 0x%08x\n", 1186 sc->sc_txdescs[i].ad_status); 1187 printf(" ad_ctl: 0x%08x\n", 1188 sc->sc_txdescs[i].ad_ctl); 1189 printf(" ad_bufaddr1: 0x%08x\n", 1190 sc->sc_txdescs[i].ad_bufaddr1); 1191 printf(" ad_bufaddr2: 0x%08x\n", 1192 sc->sc_txdescs[i].ad_bufaddr2); 1193 if (i == txs->txs_lastdesc) 1194 break; 1195 } 1196 } 1197 #endif 1198 1199 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status; 1200 if (txstat & ADSTAT_OWN) 1201 break; 1202 1203 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1204 1205 sc->sc_txfree += txs->txs_ndescs; 1206 1207 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1208 0, txs->txs_dmamap->dm_mapsize, 1209 BUS_DMASYNC_POSTWRITE); 1210 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1211 m_freem(txs->txs_mbuf); 1212 txs->txs_mbuf = NULL; 1213 1214 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1215 1216 /* 1217 * Check for errors and collisions. 1218 */ 1219 #ifdef AE_STATS 1220 if (txstat & ADSTAT_Tx_UF) 1221 sc->sc_stats.ts_tx_uf++; 1222 if (txstat & ADSTAT_Tx_TO) 1223 sc->sc_stats.ts_tx_to++; 1224 if (txstat & ADSTAT_Tx_EC) 1225 sc->sc_stats.ts_tx_ec++; 1226 if (txstat & ADSTAT_Tx_LC) 1227 sc->sc_stats.ts_tx_lc++; 1228 #endif 1229 1230 if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO)) 1231 ifp->if_oerrors++; 1232 1233 if (txstat & ADSTAT_Tx_EC) 1234 ifp->if_collisions += 16; 1235 else 1236 ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat); 1237 if (txstat & ADSTAT_Tx_LC) 1238 ifp->if_collisions++; 1239 1240 ifp->if_opackets++; 1241 } 1242 1243 /* 1244 * If there are no more pending transmissions, cancel the watchdog 1245 * timer. 1246 */ 1247 if (txs == NULL) 1248 ifp->if_timer = 0; 1249 } 1250 1251 #ifdef AE_STATS 1252 void 1253 ae_print_stats(struct ae_softc *sc) 1254 { 1255 1256 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n", 1257 device_xname(sc->sc_dev), 1258 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to, 1259 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc); 1260 } 1261 #endif 1262 1263 /* 1264 * ae_reset: 1265 * 1266 * Perform a soft reset on the chip. 1267 */ 1268 void 1269 ae_reset(struct ae_softc *sc) 1270 { 1271 int i; 1272 1273 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR); 1274 AE_BARRIER(sc); 1275 1276 /* 1277 * The chip doesn't take itself out of reset automatically. 1278 * We need to do so after 2us. 1279 */ 1280 delay(10); 1281 AE_WRITE(sc, CSR_BUSMODE, 0); 1282 AE_BARRIER(sc); 1283 1284 for (i = 0; i < 1000; i++) { 1285 /* 1286 * Wait a bit for the reset to complete before peeking 1287 * at the chip again. 1288 */ 1289 delay(10); 1290 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0) 1291 break; 1292 } 1293 1294 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR)) 1295 printf("%s: reset failed to complete\n", device_xname(sc->sc_dev)); 1296 1297 delay(1000); 1298 } 1299 1300 /* 1301 * ae_init: [ ifnet interface function ] 1302 * 1303 * Initialize the interface. Must be called at splnet(). 1304 */ 1305 static int 1306 ae_init(struct ifnet *ifp) 1307 { 1308 struct ae_softc *sc = ifp->if_softc; 1309 struct ae_txsoft *txs; 1310 struct ae_rxsoft *rxs; 1311 const uint8_t *enaddr; 1312 int i, error = 0; 1313 1314 if ((error = ae_enable(sc)) != 0) 1315 goto out; 1316 1317 /* 1318 * Cancel any pending I/O. 1319 */ 1320 ae_stop(ifp, 0); 1321 1322 /* 1323 * Reset the chip to a known state. 1324 */ 1325 ae_reset(sc); 1326 1327 /* 1328 * Initialize the BUSMODE register. 1329 */ 1330 AE_WRITE(sc, CSR_BUSMODE, 1331 /* XXX: not sure if this is a good thing or not... */ 1332 //BUSMODE_ALIGN_16B | 1333 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW); 1334 AE_BARRIER(sc); 1335 1336 /* 1337 * Initialize the transmit descriptor ring. 1338 */ 1339 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1340 for (i = 0; i < AE_NTXDESC; i++) { 1341 sc->sc_txdescs[i].ad_ctl = 0; 1342 sc->sc_txdescs[i].ad_bufaddr2 = 1343 AE_CDTXADDR(sc, AE_NEXTTX(i)); 1344 } 1345 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER; 1346 AE_CDTXSYNC(sc, 0, AE_NTXDESC, 1347 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1348 sc->sc_txfree = AE_NTXDESC; 1349 sc->sc_txnext = 0; 1350 1351 /* 1352 * Initialize the transmit job descriptors. 1353 */ 1354 SIMPLEQ_INIT(&sc->sc_txfreeq); 1355 SIMPLEQ_INIT(&sc->sc_txdirtyq); 1356 for (i = 0; i < AE_TXQUEUELEN; i++) { 1357 txs = &sc->sc_txsoft[i]; 1358 txs->txs_mbuf = NULL; 1359 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1360 } 1361 1362 /* 1363 * Initialize the receive descriptor and receive job 1364 * descriptor rings. 1365 */ 1366 for (i = 0; i < AE_NRXDESC; i++) { 1367 rxs = &sc->sc_rxsoft[i]; 1368 if (rxs->rxs_mbuf == NULL) { 1369 if ((error = ae_add_rxbuf(sc, i)) != 0) { 1370 printf("%s: unable to allocate or map rx " 1371 "buffer %d, error = %d\n", 1372 device_xname(sc->sc_dev), i, error); 1373 /* 1374 * XXX Should attempt to run with fewer receive 1375 * XXX buffers instead of just failing. 1376 */ 1377 ae_rxdrain(sc); 1378 goto out; 1379 } 1380 } else 1381 AE_INIT_RXDESC(sc, i); 1382 } 1383 sc->sc_rxptr = 0; 1384 1385 /* 1386 * Initialize the interrupt mask and enable interrupts. 1387 */ 1388 /* normal interrupts */ 1389 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS; 1390 1391 /* abnormal interrupts */ 1392 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF | 1393 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS; 1394 1395 sc->sc_rxint_mask = STATUS_RI|STATUS_RU; 1396 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT; 1397 1398 sc->sc_rxint_mask &= sc->sc_inten; 1399 sc->sc_txint_mask &= sc->sc_inten; 1400 1401 AE_WRITE(sc, CSR_INTEN, sc->sc_inten); 1402 AE_WRITE(sc, CSR_STATUS, 0xffffffff); 1403 1404 /* 1405 * Give the transmit and receive rings to the chip. 1406 */ 1407 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext)); 1408 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr)); 1409 AE_BARRIER(sc); 1410 1411 /* 1412 * Set the station address. 1413 */ 1414 enaddr = CLLADDR(ifp->if_sadl); 1415 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]); 1416 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 | 1417 enaddr[1] << 8 | enaddr[0]); 1418 AE_BARRIER(sc); 1419 1420 /* 1421 * Set the receive filter. This will start the transmit and 1422 * receive processes. 1423 */ 1424 ae_filter_setup(sc); 1425 1426 /* 1427 * Set the current media. 1428 */ 1429 if ((error = ether_mediachange(ifp)) != 0) 1430 goto out; 1431 1432 /* 1433 * Start the mac. 1434 */ 1435 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE); 1436 AE_BARRIER(sc); 1437 1438 /* 1439 * Write out the opmode. 1440 */ 1441 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | 1442 ae_txthresh[sc->sc_txthresh].txth_opmode); 1443 /* 1444 * Start the receive process. 1445 */ 1446 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 1447 AE_BARRIER(sc); 1448 1449 if (sc->sc_tick != NULL) { 1450 /* Start the one second clock. */ 1451 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc); 1452 } 1453 1454 /* 1455 * Note that the interface is now running. 1456 */ 1457 ifp->if_flags |= IFF_RUNNING; 1458 ifp->if_flags &= ~IFF_OACTIVE; 1459 sc->sc_if_flags = ifp->if_flags; 1460 1461 out: 1462 if (error) { 1463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1464 ifp->if_timer = 0; 1465 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1466 } 1467 return (error); 1468 } 1469 1470 /* 1471 * ae_enable: 1472 * 1473 * Enable the chip. 1474 */ 1475 static int 1476 ae_enable(struct ae_softc *sc) 1477 { 1478 1479 if (AE_IS_ENABLED(sc) == 0) { 1480 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq, 1481 ae_intr, sc); 1482 if (sc->sc_ih == NULL) { 1483 printf("%s: unable to establish interrupt\n", 1484 device_xname(sc->sc_dev)); 1485 return (EIO); 1486 } 1487 sc->sc_flags |= AE_ENABLED; 1488 } 1489 return (0); 1490 } 1491 1492 /* 1493 * ae_disable: 1494 * 1495 * Disable the chip. 1496 */ 1497 static void 1498 ae_disable(struct ae_softc *sc) 1499 { 1500 1501 if (AE_IS_ENABLED(sc)) { 1502 arbus_intr_disestablish(sc->sc_ih); 1503 sc->sc_flags &= ~AE_ENABLED; 1504 } 1505 } 1506 1507 /* 1508 * ae_power: 1509 * 1510 * Power management (suspend/resume) hook. 1511 */ 1512 static void 1513 ae_power(int why, void *arg) 1514 { 1515 struct ae_softc *sc = arg; 1516 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1517 int s; 1518 1519 printf("power called: %d, %x\n", why, (uint32_t)arg); 1520 s = splnet(); 1521 switch (why) { 1522 case PWR_STANDBY: 1523 /* do nothing! */ 1524 break; 1525 case PWR_SUSPEND: 1526 ae_stop(ifp, 0); 1527 ae_disable(sc); 1528 break; 1529 case PWR_RESUME: 1530 if (ifp->if_flags & IFF_UP) { 1531 ae_enable(sc); 1532 ae_init(ifp); 1533 } 1534 break; 1535 case PWR_SOFTSUSPEND: 1536 case PWR_SOFTSTANDBY: 1537 case PWR_SOFTRESUME: 1538 break; 1539 } 1540 splx(s); 1541 } 1542 1543 /* 1544 * ae_rxdrain: 1545 * 1546 * Drain the receive queue. 1547 */ 1548 static void 1549 ae_rxdrain(struct ae_softc *sc) 1550 { 1551 struct ae_rxsoft *rxs; 1552 int i; 1553 1554 for (i = 0; i < AE_NRXDESC; i++) { 1555 rxs = &sc->sc_rxsoft[i]; 1556 if (rxs->rxs_mbuf != NULL) { 1557 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1558 m_freem(rxs->rxs_mbuf); 1559 rxs->rxs_mbuf = NULL; 1560 } 1561 } 1562 } 1563 1564 /* 1565 * ae_stop: [ ifnet interface function ] 1566 * 1567 * Stop transmission on the interface. 1568 */ 1569 static void 1570 ae_stop(struct ifnet *ifp, int disable) 1571 { 1572 struct ae_softc *sc = ifp->if_softc; 1573 struct ae_txsoft *txs; 1574 1575 if (sc->sc_tick != NULL) { 1576 /* Stop the one second clock. */ 1577 callout_stop(&sc->sc_tick_callout); 1578 } 1579 1580 /* Down the MII. */ 1581 mii_down(&sc->sc_mii); 1582 1583 /* Disable interrupts. */ 1584 AE_WRITE(sc, CSR_INTEN, 0); 1585 1586 /* Stop the transmit and receive processes. */ 1587 AE_WRITE(sc, CSR_OPMODE, 0); 1588 AE_WRITE(sc, CSR_RXLIST, 0); 1589 AE_WRITE(sc, CSR_TXLIST, 0); 1590 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE); 1591 AE_BARRIER(sc); 1592 1593 /* 1594 * Release any queued transmit buffers. 1595 */ 1596 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1597 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1598 if (txs->txs_mbuf != NULL) { 1599 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1600 m_freem(txs->txs_mbuf); 1601 txs->txs_mbuf = NULL; 1602 } 1603 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1604 } 1605 1606 /* 1607 * Mark the interface down and cancel the watchdog timer. 1608 */ 1609 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1610 sc->sc_if_flags = ifp->if_flags; 1611 ifp->if_timer = 0; 1612 1613 if (disable) { 1614 ae_rxdrain(sc); 1615 ae_disable(sc); 1616 } 1617 1618 /* 1619 * Reset the chip (needed on some flavors to actually disable it). 1620 */ 1621 ae_reset(sc); 1622 } 1623 1624 /* 1625 * ae_add_rxbuf: 1626 * 1627 * Add a receive buffer to the indicated descriptor. 1628 */ 1629 static int 1630 ae_add_rxbuf(struct ae_softc *sc, int idx) 1631 { 1632 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1633 struct mbuf *m; 1634 int error; 1635 1636 MGETHDR(m, M_DONTWAIT, MT_DATA); 1637 if (m == NULL) 1638 return (ENOBUFS); 1639 1640 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1641 MCLGET(m, M_DONTWAIT); 1642 if ((m->m_flags & M_EXT) == 0) { 1643 m_freem(m); 1644 return (ENOBUFS); 1645 } 1646 1647 if (rxs->rxs_mbuf != NULL) 1648 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1649 1650 rxs->rxs_mbuf = m; 1651 1652 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1653 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1654 BUS_DMA_READ|BUS_DMA_NOWAIT); 1655 if (error) { 1656 printf("%s: can't load rx DMA map %d, error = %d\n", 1657 device_xname(sc->sc_dev), idx, error); 1658 panic("ae_add_rxbuf"); /* XXX */ 1659 } 1660 1661 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1662 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1663 1664 AE_INIT_RXDESC(sc, idx); 1665 1666 return (0); 1667 } 1668 1669 /* 1670 * ae_filter_setup: 1671 * 1672 * Set the chip's receive filter. 1673 */ 1674 static void 1675 ae_filter_setup(struct ae_softc *sc) 1676 { 1677 struct ethercom *ec = &sc->sc_ethercom; 1678 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1679 struct ether_multi *enm; 1680 struct ether_multistep step; 1681 uint32_t hash, mchash[2]; 1682 uint32_t macctl = 0; 1683 1684 /* 1685 * If the chip is running, we need to reset the interface, 1686 * and will revisit here (with IFF_RUNNING) clear. The 1687 * chip seems to really not like to have its multicast 1688 * filter programmed without a reset. 1689 */ 1690 if (ifp->if_flags & IFF_RUNNING) { 1691 (void) ae_init(ifp); 1692 return; 1693 } 1694 1695 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n", 1696 device_xname(sc->sc_dev), sc->sc_flags)); 1697 1698 macctl = AE_READ(sc, CSR_MACCTL); 1699 macctl &= ~(MACCTL_PR | MACCTL_PM); 1700 macctl |= MACCTL_HASH; 1701 macctl |= MACCTL_HBD; 1702 macctl |= MACCTL_PR; 1703 1704 if (ifp->if_flags & IFF_PROMISC) { 1705 macctl |= MACCTL_PR; 1706 goto allmulti; 1707 } 1708 1709 mchash[0] = mchash[1] = 0; 1710 1711 ETHER_FIRST_MULTI(step, ec, enm); 1712 while (enm != NULL) { 1713 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1714 /* 1715 * We must listen to a range of multicast addresses. 1716 * For now, just accept all multicasts, rather than 1717 * trying to set only those filter bits needed to match 1718 * the range. (At this time, the only use of address 1719 * ranges is for IP multicast routing, for which the 1720 * range is big enough to require all bits set.) 1721 */ 1722 goto allmulti; 1723 } 1724 1725 /* Verify whether we use big or little endian hashes */ 1726 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f; 1727 mchash[hash >> 5] |= 1 << (hash & 0x1f); 1728 ETHER_NEXT_MULTI(step, enm); 1729 } 1730 ifp->if_flags &= ~IFF_ALLMULTI; 1731 goto setit; 1732 1733 allmulti: 1734 ifp->if_flags |= IFF_ALLMULTI; 1735 mchash[0] = mchash[1] = 0xffffffff; 1736 macctl |= MACCTL_PM; 1737 1738 setit: 1739 AE_WRITE(sc, CSR_HTHI, mchash[0]); 1740 AE_WRITE(sc, CSR_HTHI, mchash[1]); 1741 1742 AE_WRITE(sc, CSR_MACCTL, macctl); 1743 AE_BARRIER(sc); 1744 1745 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n", 1746 device_xname(sc->sc_dev), macctl)); 1747 } 1748 1749 /* 1750 * ae_idle: 1751 * 1752 * Cause the transmit and/or receive processes to go idle. 1753 */ 1754 void 1755 ae_idle(struct ae_softc *sc, u_int32_t bits) 1756 { 1757 static const char * const txstate_names[] = { 1758 "STOPPED", 1759 "RUNNING - FETCH", 1760 "RUNNING - WAIT", 1761 "RUNNING - READING", 1762 "-- RESERVED --", 1763 "RUNNING - SETUP", 1764 "SUSPENDED", 1765 "RUNNING - CLOSE", 1766 }; 1767 static const char * const rxstate_names[] = { 1768 "STOPPED", 1769 "RUNNING - FETCH", 1770 "RUNNING - CHECK", 1771 "RUNNING - WAIT", 1772 "SUSPENDED", 1773 "RUNNING - CLOSE", 1774 "RUNNING - FLUSH", 1775 "RUNNING - QUEUE", 1776 }; 1777 1778 u_int32_t csr, ackmask = 0; 1779 int i; 1780 1781 if (bits & OPMODE_ST) 1782 ackmask |= STATUS_TPS; 1783 1784 if (bits & OPMODE_SR) 1785 ackmask |= STATUS_RPS; 1786 1787 AE_CLR(sc, CSR_OPMODE, bits); 1788 1789 for (i = 0; i < 1000; i++) { 1790 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask) 1791 break; 1792 delay(10); 1793 } 1794 1795 csr = AE_READ(sc, CSR_STATUS); 1796 if ((csr & ackmask) != ackmask) { 1797 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 && 1798 (csr & STATUS_TS) != STATUS_TS_STOPPED) { 1799 printf("%s: transmit process failed to idle: " 1800 "state %s\n", device_xname(sc->sc_dev), 1801 txstate_names[(csr & STATUS_TS) >> 20]); 1802 } 1803 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 && 1804 (csr & STATUS_RS) != STATUS_RS_STOPPED) { 1805 printf("%s: receive process failed to idle: " 1806 "state %s\n", device_xname(sc->sc_dev), 1807 rxstate_names[(csr & STATUS_RS) >> 17]); 1808 } 1809 } 1810 } 1811 1812 /***************************************************************************** 1813 * Support functions for MII-attached media. 1814 *****************************************************************************/ 1815 1816 /* 1817 * ae_mii_tick: 1818 * 1819 * One second timer, used to tick the MII. 1820 */ 1821 static void 1822 ae_mii_tick(void *arg) 1823 { 1824 struct ae_softc *sc = arg; 1825 int s; 1826 1827 if (!device_is_active(sc->sc_dev)) 1828 return; 1829 1830 s = splnet(); 1831 mii_tick(&sc->sc_mii); 1832 splx(s); 1833 1834 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc); 1835 } 1836 1837 /* 1838 * ae_mii_statchg: [mii interface function] 1839 * 1840 * Callback from PHY when media changes. 1841 */ 1842 static void 1843 ae_mii_statchg(struct ifnet *ifp) 1844 { 1845 struct ae_softc *sc = ifp->if_softc; 1846 uint32_t macctl, flowc; 1847 1848 //opmode = AE_READ(sc, CSR_OPMODE); 1849 macctl = AE_READ(sc, CSR_MACCTL); 1850 1851 /* XXX: do we need to do this? */ 1852 /* Idle the transmit and receive processes. */ 1853 //ae_idle(sc, OPMODE_ST|OPMODE_SR); 1854 1855 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1856 flowc = FLOWC_FCE; 1857 macctl &= ~MACCTL_DRO; 1858 macctl |= MACCTL_FDX; 1859 } else { 1860 flowc = 0; /* cannot do flow control in HDX */ 1861 macctl |= MACCTL_DRO; 1862 macctl &= ~MACCTL_FDX; 1863 } 1864 1865 AE_WRITE(sc, CSR_FLOWC, flowc); 1866 AE_WRITE(sc, CSR_MACCTL, macctl); 1867 1868 /* restore operational mode */ 1869 //AE_WRITE(sc, CSR_OPMODE, opmode); 1870 AE_BARRIER(sc); 1871 } 1872 1873 /* 1874 * ae_mii_readreg: 1875 * 1876 * Read a PHY register. 1877 */ 1878 static int 1879 ae_mii_readreg(device_t self, int phy, int reg) 1880 { 1881 struct ae_softc *sc = device_private(self); 1882 uint32_t addr; 1883 int i; 1884 1885 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT); 1886 AE_WRITE(sc, CSR_MIIADDR, addr); 1887 AE_BARRIER(sc); 1888 for (i = 0; i < 100000000; i++) { 1889 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1890 break; 1891 } 1892 1893 return (AE_READ(sc, CSR_MIIDATA) & 0xffff); 1894 } 1895 1896 /* 1897 * ae_mii_writereg: 1898 * 1899 * Write a PHY register. 1900 */ 1901 static void 1902 ae_mii_writereg(device_t self, int phy, int reg, int val) 1903 { 1904 struct ae_softc *sc = device_private(self); 1905 uint32_t addr; 1906 int i; 1907 1908 /* write the data register */ 1909 AE_WRITE(sc, CSR_MIIDATA, val); 1910 1911 /* write the address to latch it in */ 1912 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) | 1913 MIIADDR_WRITE; 1914 AE_WRITE(sc, CSR_MIIADDR, addr); 1915 AE_BARRIER(sc); 1916 1917 for (i = 0; i < 100000000; i++) { 1918 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1919 break; 1920 } 1921 } 1922