1 /* $Id: if_ae.c,v 1.21 2011/07/10 06:24:19 matt Exp $ */ 2 /*- 3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 4 * Copyright (c) 2006 Garrett D'Amore. 5 * All rights reserved. 6 * 7 * This code was written by Garrett D'Amore for the Champaign-Urbana 8 * Community Wireless Network Project. 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 3. All advertising materials mentioning features or use of this 20 * software must display the following acknowledgements: 21 * This product includes software developed by the Urbana-Champaign 22 * Independent Media Center. 23 * This product includes software developed by Garrett D'Amore. 24 * 4. Urbana-Champaign Independent Media Center's name and Garrett 25 * D'Amore's name may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 /*- 43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 44 * All rights reserved. 45 * 46 * This code is derived from software contributed to The NetBSD Foundation 47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 48 * NASA Ames Research Center; and by Charles M. Hannum. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 69 * POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72 /* 73 * Device driver for the onboard ethernet MAC found on the AR5312 74 * chip's AHB bus. 75 * 76 * This device is very simliar to the tulip in most regards, and 77 * the code is directly derived from NetBSD's tulip.c. However, it 78 * is different enough that it did not seem to be a good idea to 79 * add further complexity to the tulip driver, so we have our own. 80 * 81 * Also tulip has a lot of complexity in it for various parts/options 82 * that we don't need, and on these little boxes with only ~8MB RAM, we 83 * don't want any extra bloat. 84 */ 85 86 /* 87 * TODO: 88 * 89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align 90 * inbound packets on a half-word boundary, which would make life easier 91 * for TCP/IP. (Aligning IP headers on a word.) 92 * 93 * 2) There is stuff in original tulip to shut down the device when reacting 94 * to a a change in link status. Is that needed. 95 * 96 * 3) Test with variety of 10/100 HDX/FDX scenarios. 97 * 98 */ 99 100 #include <sys/cdefs.h> 101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.21 2011/07/10 06:24:19 matt Exp $"); 102 103 104 #include <sys/param.h> 105 #include <sys/bus.h> 106 #include <sys/callout.h> 107 #include <sys/device.h> 108 #include <sys/endian.h> 109 #include <sys/errno.h> 110 #include <sys/intr.h> 111 #include <sys/ioctl.h> 112 #include <sys/kernel.h> 113 #include <sys/malloc.h> 114 #include <sys/mbuf.h> 115 #include <sys/socket.h> 116 117 #include <uvm/uvm_extern.h> 118 119 #include <net/if.h> 120 #include <net/if_dl.h> 121 #include <net/if_media.h> 122 #include <net/if_ether.h> 123 124 #include <net/bpf.h> 125 126 #include <dev/mii/mii.h> 127 #include <dev/mii/miivar.h> 128 #include <dev/mii/mii_bitbang.h> 129 130 #include <mips/atheros/include/arbusvar.h> 131 #include <mips/atheros/dev/aereg.h> 132 #include <mips/atheros/dev/aevar.h> 133 134 static const struct { 135 u_int32_t txth_opmode; /* OPMODE bits */ 136 const char *txth_name; /* name of mode */ 137 } ae_txthresh[] = { 138 { OPMODE_TR_32, "32 words" }, 139 { OPMODE_TR_64, "64 words" }, 140 { OPMODE_TR_128, "128 words" }, 141 { OPMODE_TR_256, "256 words" }, 142 { OPMODE_SF, "store and forward mode" }, 143 { 0, NULL }, 144 }; 145 146 static int ae_match(device_t, struct cfdata *, void *); 147 static void ae_attach(device_t, device_t, void *); 148 static int ae_detach(device_t, int); 149 static int ae_activate(device_t, enum devact); 150 151 static int ae_ifflags_cb(struct ethercom *); 152 static void ae_reset(struct ae_softc *); 153 static void ae_idle(struct ae_softc *, u_int32_t); 154 155 static void ae_start(struct ifnet *); 156 static void ae_watchdog(struct ifnet *); 157 static int ae_ioctl(struct ifnet *, u_long, void *); 158 static int ae_init(struct ifnet *); 159 static void ae_stop(struct ifnet *, int); 160 161 static void ae_shutdown(void *); 162 163 static void ae_rxdrain(struct ae_softc *); 164 static int ae_add_rxbuf(struct ae_softc *, int); 165 166 static int ae_enable(struct ae_softc *); 167 static void ae_disable(struct ae_softc *); 168 static void ae_power(int, void *); 169 170 static void ae_filter_setup(struct ae_softc *); 171 172 static int ae_intr(void *); 173 static void ae_rxintr(struct ae_softc *); 174 static void ae_txintr(struct ae_softc *); 175 176 static void ae_mii_tick(void *); 177 static void ae_mii_statchg(device_t); 178 179 static int ae_mii_readreg(device_t, int, int); 180 static void ae_mii_writereg(device_t, int, int, int); 181 182 #ifdef AE_DEBUG 183 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 184 printf x 185 #else 186 #define DPRINTF(sc, x) /* nothing */ 187 #endif 188 189 #ifdef AE_STATS 190 static void ae_print_stats(struct ae_softc *); 191 #endif 192 193 CFATTACH_DECL(ae, sizeof(struct ae_softc), 194 ae_match, ae_attach, ae_detach, ae_activate); 195 196 /* 197 * ae_match: 198 * 199 * Check for a device match. 200 */ 201 int 202 ae_match(device_t parent, struct cfdata *cf, void *aux) 203 { 204 struct arbus_attach_args *aa = aux; 205 206 if (strcmp(aa->aa_name, cf->cf_name) == 0) 207 return 1; 208 209 return 0; 210 211 } 212 213 /* 214 * ae_attach: 215 * 216 * Attach an ae interface to the system. 217 */ 218 void 219 ae_attach(device_t parent, device_t self, void *aux) 220 { 221 const uint8_t *enaddr; 222 prop_data_t ea; 223 struct ae_softc *sc = device_private(self); 224 struct arbus_attach_args *aa = aux; 225 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 226 int i, error; 227 228 callout_init(&sc->sc_tick_callout, 0); 229 230 printf(": Atheros AR531X 10/100 Ethernet\n"); 231 232 /* 233 * Try to get MAC address. 234 */ 235 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-address"); 236 if (ea == NULL) { 237 printf("%s: unable to get mac-addr property\n", 238 sc->sc_dev.dv_xname); 239 return; 240 } 241 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 242 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 243 enaddr = prop_data_data_nocopy(ea); 244 245 /* Announce ourselves. */ 246 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 247 ether_sprintf(enaddr)); 248 249 sc->sc_cirq = aa->aa_cirq; 250 sc->sc_mirq = aa->aa_mirq; 251 sc->sc_st = aa->aa_bst; 252 sc->sc_dmat = aa->aa_dmat; 253 254 SIMPLEQ_INIT(&sc->sc_txfreeq); 255 SIMPLEQ_INIT(&sc->sc_txdirtyq); 256 257 /* 258 * Map registers. 259 */ 260 sc->sc_size = aa->aa_size; 261 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0, 262 &sc->sc_sh)) != 0) { 263 printf("%s: unable to map registers, error = %d\n", 264 sc->sc_dev.dv_xname, error); 265 goto fail_0; 266 } 267 268 /* 269 * Allocate the control data structures, and create and load the 270 * DMA map for it. 271 */ 272 if ((error = bus_dmamem_alloc(sc->sc_dmat, 273 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 274 1, &sc->sc_cdnseg, 0)) != 0) { 275 printf("%s: unable to allocate control data, error = %d\n", 276 sc->sc_dev.dv_xname, error); 277 goto fail_1; 278 } 279 280 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg, 281 sizeof(struct ae_control_data), (void **)&sc->sc_control_data, 282 BUS_DMA_COHERENT)) != 0) { 283 printf("%s: unable to map control data, error = %d\n", 284 sc->sc_dev.dv_xname, error); 285 goto fail_2; 286 } 287 288 if ((error = bus_dmamap_create(sc->sc_dmat, 289 sizeof(struct ae_control_data), 1, 290 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 291 printf("%s: unable to create control data DMA map, " 292 "error = %d\n", sc->sc_dev.dv_xname, error); 293 goto fail_3; 294 } 295 296 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 297 sc->sc_control_data, sizeof(struct ae_control_data), NULL, 298 0)) != 0) { 299 printf("%s: unable to load control data DMA map, error = %d\n", 300 sc->sc_dev.dv_xname, error); 301 goto fail_4; 302 } 303 304 /* 305 * Create the transmit buffer DMA maps. 306 */ 307 for (i = 0; i < AE_TXQUEUELEN; i++) { 308 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 309 AE_NTXSEGS, MCLBYTES, 0, 0, 310 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 311 printf("%s: unable to create tx DMA map %d, " 312 "error = %d\n", sc->sc_dev.dv_xname, i, error); 313 goto fail_5; 314 } 315 } 316 317 /* 318 * Create the receive buffer DMA maps. 319 */ 320 for (i = 0; i < AE_NRXDESC; i++) { 321 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 322 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 323 printf("%s: unable to create rx DMA map %d, " 324 "error = %d\n", sc->sc_dev.dv_xname, i, error); 325 goto fail_6; 326 } 327 sc->sc_rxsoft[i].rxs_mbuf = NULL; 328 } 329 330 /* 331 * Reset the chip to a known state. 332 */ 333 ae_reset(sc); 334 335 /* 336 * From this point forward, the attachment cannot fail. A failure 337 * before this point releases all resources that may have been 338 * allocated. 339 */ 340 sc->sc_flags |= AE_ATTACHED; 341 342 /* 343 * Initialize our media structures. This may probe the MII, if 344 * present. 345 */ 346 sc->sc_mii.mii_ifp = ifp; 347 sc->sc_mii.mii_readreg = ae_mii_readreg; 348 sc->sc_mii.mii_writereg = ae_mii_writereg; 349 sc->sc_mii.mii_statchg = ae_mii_statchg; 350 sc->sc_ethercom.ec_mii = &sc->sc_mii; 351 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 352 ether_mediastatus); 353 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 354 MII_OFFSET_ANY, 0); 355 356 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 357 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 358 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 359 } else 360 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 361 362 sc->sc_tick = ae_mii_tick; 363 364 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 365 ifp->if_softc = sc; 366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 367 sc->sc_if_flags = ifp->if_flags; 368 ifp->if_ioctl = ae_ioctl; 369 ifp->if_start = ae_start; 370 ifp->if_watchdog = ae_watchdog; 371 ifp->if_init = ae_init; 372 ifp->if_stop = ae_stop; 373 IFQ_SET_READY(&ifp->if_snd); 374 375 /* 376 * We can support 802.1Q VLAN-sized frames. 377 */ 378 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 379 380 /* 381 * Attach the interface. 382 */ 383 if_attach(ifp); 384 ether_ifattach(ifp, enaddr); 385 ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb); 386 387 #if NRND > 0 388 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname, 389 RND_TYPE_NET, 0); 390 #endif 391 392 /* 393 * Make sure the interface is shutdown during reboot. 394 */ 395 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc); 396 if (sc->sc_sdhook == NULL) 397 printf("%s: WARNING: unable to establish shutdown hook\n", 398 sc->sc_dev.dv_xname); 399 400 /* 401 * Add a suspend hook to make sure we come back up after a 402 * resume. 403 */ 404 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname, 405 ae_power, sc); 406 if (sc->sc_powerhook == NULL) 407 printf("%s: WARNING: unable to establish power hook\n", 408 sc->sc_dev.dv_xname); 409 return; 410 411 /* 412 * Free any resources we've allocated during the failed attach 413 * attempt. Do this in reverse order and fall through. 414 */ 415 fail_6: 416 for (i = 0; i < AE_NRXDESC; i++) { 417 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 418 bus_dmamap_destroy(sc->sc_dmat, 419 sc->sc_rxsoft[i].rxs_dmamap); 420 } 421 fail_5: 422 for (i = 0; i < AE_TXQUEUELEN; i++) { 423 if (sc->sc_txsoft[i].txs_dmamap != NULL) 424 bus_dmamap_destroy(sc->sc_dmat, 425 sc->sc_txsoft[i].txs_dmamap); 426 } 427 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 428 fail_4: 429 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 430 fail_3: 431 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 432 sizeof(struct ae_control_data)); 433 fail_2: 434 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 435 fail_1: 436 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 437 fail_0: 438 return; 439 } 440 441 /* 442 * ae_activate: 443 * 444 * Handle device activation/deactivation requests. 445 */ 446 int 447 ae_activate(device_t self, enum devact act) 448 { 449 struct ae_softc *sc = device_private(self); 450 451 switch (act) { 452 case DVACT_DEACTIVATE: 453 if_deactivate(&sc->sc_ethercom.ec_if); 454 return 0; 455 default: 456 return EOPNOTSUPP; 457 } 458 } 459 460 /* 461 * ae_detach: 462 * 463 * Detach a device interface. 464 */ 465 int 466 ae_detach(device_t self, int flags) 467 { 468 struct ae_softc *sc = device_private(self); 469 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 470 struct ae_rxsoft *rxs; 471 struct ae_txsoft *txs; 472 int i; 473 474 /* 475 * Succeed now if there isn't any work to do. 476 */ 477 if ((sc->sc_flags & AE_ATTACHED) == 0) 478 return (0); 479 480 /* Unhook our tick handler. */ 481 if (sc->sc_tick) 482 callout_stop(&sc->sc_tick_callout); 483 484 /* Detach all PHYs */ 485 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 486 487 /* Delete all remaining media. */ 488 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 489 490 #if NRND > 0 491 rnd_detach_source(&sc->sc_rnd_source); 492 #endif 493 ether_ifdetach(ifp); 494 if_detach(ifp); 495 496 for (i = 0; i < AE_NRXDESC; i++) { 497 rxs = &sc->sc_rxsoft[i]; 498 if (rxs->rxs_mbuf != NULL) { 499 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 500 m_freem(rxs->rxs_mbuf); 501 rxs->rxs_mbuf = NULL; 502 } 503 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); 504 } 505 for (i = 0; i < AE_TXQUEUELEN; i++) { 506 txs = &sc->sc_txsoft[i]; 507 if (txs->txs_mbuf != NULL) { 508 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 509 m_freem(txs->txs_mbuf); 510 txs->txs_mbuf = NULL; 511 } 512 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); 513 } 514 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 515 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 516 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 517 sizeof(struct ae_control_data)); 518 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 519 520 shutdownhook_disestablish(sc->sc_sdhook); 521 powerhook_disestablish(sc->sc_powerhook); 522 523 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 524 525 526 return (0); 527 } 528 529 /* 530 * ae_shutdown: 531 * 532 * Make sure the interface is stopped at reboot time. 533 */ 534 static void 535 ae_shutdown(void *arg) 536 { 537 struct ae_softc *sc = arg; 538 539 ae_stop(&sc->sc_ethercom.ec_if, 1); 540 } 541 542 /* 543 * ae_start: [ifnet interface function] 544 * 545 * Start packet transmission on the interface. 546 */ 547 static void 548 ae_start(struct ifnet *ifp) 549 { 550 struct ae_softc *sc = ifp->if_softc; 551 struct mbuf *m0, *m; 552 struct ae_txsoft *txs, *last_txs = NULL; 553 bus_dmamap_t dmamap; 554 int error, firsttx, nexttx, lasttx = 1, ofree, seg; 555 556 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n", 557 sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags)); 558 559 560 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 561 return; 562 563 /* 564 * Remember the previous number of free descriptors and 565 * the first descriptor we'll use. 566 */ 567 ofree = sc->sc_txfree; 568 firsttx = sc->sc_txnext; 569 570 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n", 571 sc->sc_dev.dv_xname, ofree, firsttx)); 572 573 /* 574 * Loop through the send queue, setting up transmit descriptors 575 * until we drain the queue, or use up all available transmit 576 * descriptors. 577 */ 578 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 579 sc->sc_txfree != 0) { 580 /* 581 * Grab a packet off the queue. 582 */ 583 IFQ_POLL(&ifp->if_snd, m0); 584 if (m0 == NULL) 585 break; 586 m = NULL; 587 588 dmamap = txs->txs_dmamap; 589 590 /* 591 * Load the DMA map. If this fails, the packet either 592 * didn't fit in the alloted number of segments, or we were 593 * short on resources. In this case, we'll copy and try 594 * again. 595 */ 596 if (((mtod(m0, uintptr_t) & 3) != 0) || 597 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 598 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 599 MGETHDR(m, M_DONTWAIT, MT_DATA); 600 if (m == NULL) { 601 printf("%s: unable to allocate Tx mbuf\n", 602 sc->sc_dev.dv_xname); 603 break; 604 } 605 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 606 if (m0->m_pkthdr.len > MHLEN) { 607 MCLGET(m, M_DONTWAIT); 608 if ((m->m_flags & M_EXT) == 0) { 609 printf("%s: unable to allocate Tx " 610 "cluster\n", sc->sc_dev.dv_xname); 611 m_freem(m); 612 break; 613 } 614 } 615 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 616 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 617 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 618 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 619 if (error) { 620 printf("%s: unable to load Tx buffer, " 621 "error = %d\n", sc->sc_dev.dv_xname, 622 error); 623 break; 624 } 625 } 626 627 /* 628 * Ensure we have enough descriptors free to describe 629 * the packet. 630 */ 631 if (dmamap->dm_nsegs > sc->sc_txfree) { 632 /* 633 * Not enough free descriptors to transmit this 634 * packet. We haven't committed to anything yet, 635 * so just unload the DMA map, put the packet 636 * back on the queue, and punt. Notify the upper 637 * layer that there are no more slots left. 638 * 639 * XXX We could allocate an mbuf and copy, but 640 * XXX it is worth it? 641 */ 642 ifp->if_flags |= IFF_OACTIVE; 643 bus_dmamap_unload(sc->sc_dmat, dmamap); 644 if (m != NULL) 645 m_freem(m); 646 break; 647 } 648 649 IFQ_DEQUEUE(&ifp->if_snd, m0); 650 if (m != NULL) { 651 m_freem(m0); 652 m0 = m; 653 } 654 655 /* 656 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 657 */ 658 659 /* Sync the DMA map. */ 660 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 661 BUS_DMASYNC_PREWRITE); 662 663 /* 664 * Initialize the transmit descriptors. 665 */ 666 for (nexttx = sc->sc_txnext, seg = 0; 667 seg < dmamap->dm_nsegs; 668 seg++, nexttx = AE_NEXTTX(nexttx)) { 669 /* 670 * If this is the first descriptor we're 671 * enqueueing, don't set the OWN bit just 672 * yet. That could cause a race condition. 673 * We'll do it below. 674 */ 675 sc->sc_txdescs[nexttx].ad_status = 676 (nexttx == firsttx) ? 0 : ADSTAT_OWN; 677 sc->sc_txdescs[nexttx].ad_bufaddr1 = 678 dmamap->dm_segs[seg].ds_addr; 679 sc->sc_txdescs[nexttx].ad_ctl = 680 (dmamap->dm_segs[seg].ds_len << 681 ADCTL_SIZE1_SHIFT) | 682 (nexttx == (AE_NTXDESC - 1) ? 683 ADCTL_ER : 0); 684 lasttx = nexttx; 685 } 686 687 KASSERT(lasttx != -1); 688 689 /* Set `first segment' and `last segment' appropriately. */ 690 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS; 691 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS; 692 693 #ifdef AE_DEBUG 694 if (ifp->if_flags & IFF_DEBUG) { 695 printf(" txsoft %p transmit chain:\n", txs); 696 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) { 697 printf(" descriptor %d:\n", seg); 698 printf(" ad_status: 0x%08x\n", 699 sc->sc_txdescs[seg].ad_status); 700 printf(" ad_ctl: 0x%08x\n", 701 sc->sc_txdescs[seg].ad_ctl); 702 printf(" ad_bufaddr1: 0x%08x\n", 703 sc->sc_txdescs[seg].ad_bufaddr1); 704 printf(" ad_bufaddr2: 0x%08x\n", 705 sc->sc_txdescs[seg].ad_bufaddr2); 706 if (seg == lasttx) 707 break; 708 } 709 } 710 #endif 711 712 /* Sync the descriptors we're using. */ 713 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 714 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 715 716 /* 717 * Store a pointer to the packet so we can free it later, 718 * and remember what txdirty will be once the packet is 719 * done. 720 */ 721 txs->txs_mbuf = m0; 722 txs->txs_firstdesc = sc->sc_txnext; 723 txs->txs_lastdesc = lasttx; 724 txs->txs_ndescs = dmamap->dm_nsegs; 725 726 /* Advance the tx pointer. */ 727 sc->sc_txfree -= dmamap->dm_nsegs; 728 sc->sc_txnext = nexttx; 729 730 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 731 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 732 733 last_txs = txs; 734 735 /* 736 * Pass the packet to any BPF listeners. 737 */ 738 bpf_mtap(ifp, m0); 739 } 740 741 if (txs == NULL || sc->sc_txfree == 0) { 742 /* No more slots left; notify upper layer. */ 743 ifp->if_flags |= IFF_OACTIVE; 744 } 745 746 if (sc->sc_txfree != ofree) { 747 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 748 sc->sc_dev.dv_xname, lasttx, firsttx)); 749 /* 750 * Cause a transmit interrupt to happen on the 751 * last packet we enqueued. 752 */ 753 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC; 754 AE_CDTXSYNC(sc, lasttx, 1, 755 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 756 757 /* 758 * The entire packet chain is set up. Give the 759 * first descriptor to the chip now. 760 */ 761 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN; 762 AE_CDTXSYNC(sc, firsttx, 1, 763 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 764 765 /* Wake up the transmitter. */ 766 /* XXX USE AUTOPOLLING? */ 767 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD); 768 AE_BARRIER(sc); 769 770 /* Set a watchdog timer in case the chip flakes out. */ 771 ifp->if_timer = 5; 772 } 773 } 774 775 /* 776 * ae_watchdog: [ifnet interface function] 777 * 778 * Watchdog timer handler. 779 */ 780 static void 781 ae_watchdog(struct ifnet *ifp) 782 { 783 struct ae_softc *sc = ifp->if_softc; 784 int doing_transmit; 785 786 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq)); 787 788 if (doing_transmit) { 789 printf("%s: transmit timeout\n", sc->sc_dev.dv_xname); 790 ifp->if_oerrors++; 791 } 792 else 793 printf("%s: spurious watchdog timeout\n", sc->sc_dev.dv_xname); 794 795 (void) ae_init(ifp); 796 797 /* Try to get more packets going. */ 798 ae_start(ifp); 799 } 800 801 /* If the interface is up and running, only modify the receive 802 * filter when changing to/from promiscuous mode. Otherwise return 803 * ENETRESET so that ether_ioctl will reset the chip. 804 */ 805 static int 806 ae_ifflags_cb(struct ethercom *ec) 807 { 808 struct ifnet *ifp = &ec->ec_if; 809 struct ae_softc *sc = ifp->if_softc; 810 int change = ifp->if_flags ^ sc->sc_if_flags; 811 812 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 813 return ENETRESET; 814 else if ((change & IFF_PROMISC) != 0) 815 ae_filter_setup(sc); 816 return 0; 817 } 818 819 /* 820 * ae_ioctl: [ifnet interface function] 821 * 822 * Handle control requests from the operator. 823 */ 824 static int 825 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data) 826 { 827 struct ae_softc *sc = ifp->if_softc; 828 int s, error; 829 830 s = splnet(); 831 832 error = ether_ioctl(ifp, cmd, data); 833 if (error == ENETRESET) { 834 if (ifp->if_flags & IFF_RUNNING) { 835 /* 836 * Multicast list has changed. Set the 837 * hardware filter accordingly. 838 */ 839 ae_filter_setup(sc); 840 } 841 error = 0; 842 } 843 844 /* Try to get more packets going. */ 845 if (AE_IS_ENABLED(sc)) 846 ae_start(ifp); 847 848 sc->sc_if_flags = ifp->if_flags; 849 splx(s); 850 return (error); 851 } 852 853 /* 854 * ae_intr: 855 * 856 * Interrupt service routine. 857 */ 858 int 859 ae_intr(void *arg) 860 { 861 struct ae_softc *sc = arg; 862 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 863 u_int32_t status, rxstatus, txstatus; 864 int handled = 0, txthresh; 865 866 DPRINTF(sc, ("%s: ae_intr\n", sc->sc_dev.dv_xname)); 867 868 #ifdef DEBUG 869 if (AE_IS_ENABLED(sc) == 0) 870 panic("%s: ae_intr: not enabled", sc->sc_dev.dv_xname); 871 #endif 872 873 /* 874 * If the interface isn't running, the interrupt couldn't 875 * possibly have come from us. 876 */ 877 if ((ifp->if_flags & IFF_RUNNING) == 0 || 878 !device_is_active(&sc->sc_dev)) { 879 printf("spurious?!?\n"); 880 return (0); 881 } 882 883 for (;;) { 884 status = AE_READ(sc, CSR_STATUS); 885 if (status) { 886 AE_WRITE(sc, CSR_STATUS, status); 887 AE_BARRIER(sc); 888 } 889 890 if ((status & sc->sc_inten) == 0) 891 break; 892 893 handled = 1; 894 895 rxstatus = status & sc->sc_rxint_mask; 896 txstatus = status & sc->sc_txint_mask; 897 898 if (rxstatus) { 899 /* Grab new any new packets. */ 900 ae_rxintr(sc); 901 902 if (rxstatus & STATUS_RU) { 903 printf("%s: receive ring overrun\n", 904 sc->sc_dev.dv_xname); 905 /* Get the receive process going again. */ 906 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 907 AE_BARRIER(sc); 908 break; 909 } 910 } 911 912 if (txstatus) { 913 /* Sweep up transmit descriptors. */ 914 ae_txintr(sc); 915 916 if (txstatus & STATUS_TJT) 917 printf("%s: transmit jabber timeout\n", 918 sc->sc_dev.dv_xname); 919 920 if (txstatus & STATUS_UNF) { 921 /* 922 * Increase our transmit threshold if 923 * another is available. 924 */ 925 txthresh = sc->sc_txthresh + 1; 926 if (ae_txthresh[txthresh].txth_name != NULL) { 927 uint32_t opmode; 928 /* Idle the transmit process. */ 929 opmode = AE_READ(sc, CSR_OPMODE); 930 ae_idle(sc, OPMODE_ST); 931 932 sc->sc_txthresh = txthresh; 933 opmode &= 934 ~(OPMODE_TR|OPMODE_SF); 935 opmode |= 936 ae_txthresh[txthresh].txth_opmode; 937 printf("%s: transmit underrun; new " 938 "threshold: %s\n", 939 sc->sc_dev.dv_xname, 940 ae_txthresh[txthresh].txth_name); 941 942 /* 943 * Set the new threshold and restart 944 * the transmit process. 945 */ 946 AE_WRITE(sc, CSR_OPMODE, opmode); 947 AE_BARRIER(sc); 948 } 949 /* 950 * XXX Log every Nth underrun from 951 * XXX now on? 952 */ 953 } 954 } 955 956 if (status & (STATUS_TPS|STATUS_RPS)) { 957 if (status & STATUS_TPS) 958 printf("%s: transmit process stopped\n", 959 sc->sc_dev.dv_xname); 960 if (status & STATUS_RPS) 961 printf("%s: receive process stopped\n", 962 sc->sc_dev.dv_xname); 963 (void) ae_init(ifp); 964 break; 965 } 966 967 if (status & STATUS_SE) { 968 const char *str; 969 970 if (status & STATUS_TX_ABORT) 971 str = "tx abort"; 972 else if (status & STATUS_RX_ABORT) 973 str = "rx abort"; 974 else 975 str = "unknown error"; 976 977 printf("%s: fatal system error: %s\n", 978 sc->sc_dev.dv_xname, str); 979 (void) ae_init(ifp); 980 break; 981 } 982 983 /* 984 * Not handled: 985 * 986 * Transmit buffer unavailable -- normal 987 * condition, nothing to do, really. 988 * 989 * General purpose timer experied -- we don't 990 * use the general purpose timer. 991 * 992 * Early receive interrupt -- not available on 993 * all chips, we just use RI. We also only 994 * use single-segment receive DMA, so this 995 * is mostly useless. 996 */ 997 } 998 999 /* Try to get more packets going. */ 1000 ae_start(ifp); 1001 1002 #if NRND > 0 1003 if (handled) 1004 rnd_add_uint32(&sc->sc_rnd_source, status); 1005 #endif 1006 return (handled); 1007 } 1008 1009 /* 1010 * ae_rxintr: 1011 * 1012 * Helper; handle receive interrupts. 1013 */ 1014 static void 1015 ae_rxintr(struct ae_softc *sc) 1016 { 1017 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1018 struct ether_header *eh; 1019 struct ae_rxsoft *rxs; 1020 struct mbuf *m; 1021 u_int32_t rxstat; 1022 int i, len; 1023 1024 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) { 1025 rxs = &sc->sc_rxsoft[i]; 1026 1027 AE_CDRXSYNC(sc, i, 1028 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1029 1030 rxstat = sc->sc_rxdescs[i].ad_status; 1031 1032 if (rxstat & ADSTAT_OWN) { 1033 /* 1034 * We have processed all of the receive buffers. 1035 */ 1036 break; 1037 } 1038 1039 /* 1040 * If any collisions were seen on the wire, count one. 1041 */ 1042 if (rxstat & ADSTAT_Rx_CS) 1043 ifp->if_collisions++; 1044 1045 /* 1046 * If an error occurred, update stats, clear the status 1047 * word, and leave the packet buffer in place. It will 1048 * simply be reused the next time the ring comes around. 1049 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long 1050 * error. 1051 */ 1052 if (rxstat & ADSTAT_ES && 1053 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 || 1054 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF | 1055 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) { 1056 #define PRINTERR(bit, str) \ 1057 if (rxstat & (bit)) \ 1058 printf("%s: receive error: %s\n", \ 1059 sc->sc_dev.dv_xname, str) 1060 ifp->if_ierrors++; 1061 PRINTERR(ADSTAT_Rx_DE, "descriptor error"); 1062 PRINTERR(ADSTAT_Rx_RF, "runt frame"); 1063 PRINTERR(ADSTAT_Rx_TL, "frame too long"); 1064 PRINTERR(ADSTAT_Rx_RE, "MII error"); 1065 PRINTERR(ADSTAT_Rx_DB, "dribbling bit"); 1066 PRINTERR(ADSTAT_Rx_CE, "CRC error"); 1067 #undef PRINTERR 1068 AE_INIT_RXDESC(sc, i); 1069 continue; 1070 } 1071 1072 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1073 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1074 1075 /* 1076 * No errors; receive the packet. Note the chip 1077 * includes the CRC with every packet. 1078 */ 1079 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN; 1080 1081 /* 1082 * XXX: the Atheros part can align on half words. what 1083 * is the performance implication of this? Probably 1084 * minimal, and we should use it... 1085 */ 1086 #ifdef __NO_STRICT_ALIGNMENT 1087 /* 1088 * Allocate a new mbuf cluster. If that fails, we are 1089 * out of memory, and must drop the packet and recycle 1090 * the buffer that's already attached to this descriptor. 1091 */ 1092 m = rxs->rxs_mbuf; 1093 if (ae_add_rxbuf(sc, i) != 0) { 1094 ifp->if_ierrors++; 1095 AE_INIT_RXDESC(sc, i); 1096 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1097 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1098 continue; 1099 } 1100 #else 1101 /* 1102 * The chip's receive buffers must be 4-byte aligned. 1103 * But this means that the data after the Ethernet header 1104 * is misaligned. We must allocate a new buffer and 1105 * copy the data, shifted forward 2 bytes. 1106 */ 1107 MGETHDR(m, M_DONTWAIT, MT_DATA); 1108 if (m == NULL) { 1109 dropit: 1110 ifp->if_ierrors++; 1111 AE_INIT_RXDESC(sc, i); 1112 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1113 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1114 continue; 1115 } 1116 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1117 if (len > (MHLEN - 2)) { 1118 MCLGET(m, M_DONTWAIT); 1119 if ((m->m_flags & M_EXT) == 0) { 1120 m_freem(m); 1121 goto dropit; 1122 } 1123 } 1124 m->m_data += 2; 1125 1126 /* 1127 * Note that we use clusters for incoming frames, so the 1128 * buffer is virtually contiguous. 1129 */ 1130 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); 1131 1132 /* Allow the receive descriptor to continue using its mbuf. */ 1133 AE_INIT_RXDESC(sc, i); 1134 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1135 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1136 #endif /* __NO_STRICT_ALIGNMENT */ 1137 1138 ifp->if_ipackets++; 1139 eh = mtod(m, struct ether_header *); 1140 m->m_pkthdr.rcvif = ifp; 1141 m->m_pkthdr.len = m->m_len = len; 1142 1143 /* 1144 * Pass this up to any BPF listeners, but only 1145 * pass it up the stack if its for us. 1146 */ 1147 bpf_mtap(ifp, m); 1148 1149 /* Pass it on. */ 1150 (*ifp->if_input)(ifp, m); 1151 } 1152 1153 /* Update the receive pointer. */ 1154 sc->sc_rxptr = i; 1155 } 1156 1157 /* 1158 * ae_txintr: 1159 * 1160 * Helper; handle transmit interrupts. 1161 */ 1162 static void 1163 ae_txintr(struct ae_softc *sc) 1164 { 1165 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1166 struct ae_txsoft *txs; 1167 u_int32_t txstat; 1168 1169 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n", 1170 sc->sc_dev.dv_xname, sc->sc_flags)); 1171 1172 ifp->if_flags &= ~IFF_OACTIVE; 1173 1174 /* 1175 * Go through our Tx list and free mbufs for those 1176 * frames that have been transmitted. 1177 */ 1178 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1179 AE_CDTXSYNC(sc, txs->txs_lastdesc, 1180 txs->txs_ndescs, 1181 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1182 1183 #ifdef AE_DEBUG 1184 if (ifp->if_flags & IFF_DEBUG) { 1185 int i; 1186 printf(" txsoft %p transmit chain:\n", txs); 1187 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) { 1188 printf(" descriptor %d:\n", i); 1189 printf(" ad_status: 0x%08x\n", 1190 sc->sc_txdescs[i].ad_status); 1191 printf(" ad_ctl: 0x%08x\n", 1192 sc->sc_txdescs[i].ad_ctl); 1193 printf(" ad_bufaddr1: 0x%08x\n", 1194 sc->sc_txdescs[i].ad_bufaddr1); 1195 printf(" ad_bufaddr2: 0x%08x\n", 1196 sc->sc_txdescs[i].ad_bufaddr2); 1197 if (i == txs->txs_lastdesc) 1198 break; 1199 } 1200 } 1201 #endif 1202 1203 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status; 1204 if (txstat & ADSTAT_OWN) 1205 break; 1206 1207 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1208 1209 sc->sc_txfree += txs->txs_ndescs; 1210 1211 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1212 0, txs->txs_dmamap->dm_mapsize, 1213 BUS_DMASYNC_POSTWRITE); 1214 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1215 m_freem(txs->txs_mbuf); 1216 txs->txs_mbuf = NULL; 1217 1218 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1219 1220 /* 1221 * Check for errors and collisions. 1222 */ 1223 #ifdef AE_STATS 1224 if (txstat & ADSTAT_Tx_UF) 1225 sc->sc_stats.ts_tx_uf++; 1226 if (txstat & ADSTAT_Tx_TO) 1227 sc->sc_stats.ts_tx_to++; 1228 if (txstat & ADSTAT_Tx_EC) 1229 sc->sc_stats.ts_tx_ec++; 1230 if (txstat & ADSTAT_Tx_LC) 1231 sc->sc_stats.ts_tx_lc++; 1232 #endif 1233 1234 if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO)) 1235 ifp->if_oerrors++; 1236 1237 if (txstat & ADSTAT_Tx_EC) 1238 ifp->if_collisions += 16; 1239 else 1240 ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat); 1241 if (txstat & ADSTAT_Tx_LC) 1242 ifp->if_collisions++; 1243 1244 ifp->if_opackets++; 1245 } 1246 1247 /* 1248 * If there are no more pending transmissions, cancel the watchdog 1249 * timer. 1250 */ 1251 if (txs == NULL) 1252 ifp->if_timer = 0; 1253 } 1254 1255 #ifdef AE_STATS 1256 void 1257 ae_print_stats(struct ae_softc *sc) 1258 { 1259 1260 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n", 1261 sc->sc_dev.dv_xname, 1262 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to, 1263 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc); 1264 } 1265 #endif 1266 1267 /* 1268 * ae_reset: 1269 * 1270 * Perform a soft reset on the chip. 1271 */ 1272 void 1273 ae_reset(struct ae_softc *sc) 1274 { 1275 int i; 1276 1277 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR); 1278 AE_BARRIER(sc); 1279 1280 /* 1281 * The chip doesn't take itself out of reset automatically. 1282 * We need to do so after 2us. 1283 */ 1284 delay(10); 1285 AE_WRITE(sc, CSR_BUSMODE, 0); 1286 AE_BARRIER(sc); 1287 1288 for (i = 0; i < 1000; i++) { 1289 /* 1290 * Wait a bit for the reset to complete before peeking 1291 * at the chip again. 1292 */ 1293 delay(10); 1294 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0) 1295 break; 1296 } 1297 1298 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR)) 1299 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 1300 1301 delay(1000); 1302 } 1303 1304 /* 1305 * ae_init: [ ifnet interface function ] 1306 * 1307 * Initialize the interface. Must be called at splnet(). 1308 */ 1309 static int 1310 ae_init(struct ifnet *ifp) 1311 { 1312 struct ae_softc *sc = ifp->if_softc; 1313 struct ae_txsoft *txs; 1314 struct ae_rxsoft *rxs; 1315 const uint8_t *enaddr; 1316 int i, error = 0; 1317 1318 if ((error = ae_enable(sc)) != 0) 1319 goto out; 1320 1321 /* 1322 * Cancel any pending I/O. 1323 */ 1324 ae_stop(ifp, 0); 1325 1326 /* 1327 * Reset the chip to a known state. 1328 */ 1329 ae_reset(sc); 1330 1331 /* 1332 * Initialize the BUSMODE register. 1333 */ 1334 AE_WRITE(sc, CSR_BUSMODE, 1335 /* XXX: not sure if this is a good thing or not... */ 1336 //BUSMODE_ALIGN_16B | 1337 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW); 1338 AE_BARRIER(sc); 1339 1340 /* 1341 * Initialize the transmit descriptor ring. 1342 */ 1343 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1344 for (i = 0; i < AE_NTXDESC; i++) { 1345 sc->sc_txdescs[i].ad_ctl = 0; 1346 sc->sc_txdescs[i].ad_bufaddr2 = 1347 AE_CDTXADDR(sc, AE_NEXTTX(i)); 1348 } 1349 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER; 1350 AE_CDTXSYNC(sc, 0, AE_NTXDESC, 1351 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1352 sc->sc_txfree = AE_NTXDESC; 1353 sc->sc_txnext = 0; 1354 1355 /* 1356 * Initialize the transmit job descriptors. 1357 */ 1358 SIMPLEQ_INIT(&sc->sc_txfreeq); 1359 SIMPLEQ_INIT(&sc->sc_txdirtyq); 1360 for (i = 0; i < AE_TXQUEUELEN; i++) { 1361 txs = &sc->sc_txsoft[i]; 1362 txs->txs_mbuf = NULL; 1363 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1364 } 1365 1366 /* 1367 * Initialize the receive descriptor and receive job 1368 * descriptor rings. 1369 */ 1370 for (i = 0; i < AE_NRXDESC; i++) { 1371 rxs = &sc->sc_rxsoft[i]; 1372 if (rxs->rxs_mbuf == NULL) { 1373 if ((error = ae_add_rxbuf(sc, i)) != 0) { 1374 printf("%s: unable to allocate or map rx " 1375 "buffer %d, error = %d\n", 1376 sc->sc_dev.dv_xname, i, error); 1377 /* 1378 * XXX Should attempt to run with fewer receive 1379 * XXX buffers instead of just failing. 1380 */ 1381 ae_rxdrain(sc); 1382 goto out; 1383 } 1384 } else 1385 AE_INIT_RXDESC(sc, i); 1386 } 1387 sc->sc_rxptr = 0; 1388 1389 /* 1390 * Initialize the interrupt mask and enable interrupts. 1391 */ 1392 /* normal interrupts */ 1393 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS; 1394 1395 /* abnormal interrupts */ 1396 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF | 1397 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS; 1398 1399 sc->sc_rxint_mask = STATUS_RI|STATUS_RU; 1400 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT; 1401 1402 sc->sc_rxint_mask &= sc->sc_inten; 1403 sc->sc_txint_mask &= sc->sc_inten; 1404 1405 AE_WRITE(sc, CSR_INTEN, sc->sc_inten); 1406 AE_WRITE(sc, CSR_STATUS, 0xffffffff); 1407 1408 /* 1409 * Give the transmit and receive rings to the chip. 1410 */ 1411 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext)); 1412 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr)); 1413 AE_BARRIER(sc); 1414 1415 /* 1416 * Set the station address. 1417 */ 1418 enaddr = CLLADDR(ifp->if_sadl); 1419 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]); 1420 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 | 1421 enaddr[1] << 8 | enaddr[0]); 1422 AE_BARRIER(sc); 1423 1424 /* 1425 * Set the receive filter. This will start the transmit and 1426 * receive processes. 1427 */ 1428 ae_filter_setup(sc); 1429 1430 /* 1431 * Set the current media. 1432 */ 1433 if ((error = ether_mediachange(ifp)) != 0) 1434 goto out; 1435 1436 /* 1437 * Start the mac. 1438 */ 1439 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE); 1440 AE_BARRIER(sc); 1441 1442 /* 1443 * Write out the opmode. 1444 */ 1445 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | 1446 ae_txthresh[sc->sc_txthresh].txth_opmode); 1447 /* 1448 * Start the receive process. 1449 */ 1450 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 1451 AE_BARRIER(sc); 1452 1453 if (sc->sc_tick != NULL) { 1454 /* Start the one second clock. */ 1455 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc); 1456 } 1457 1458 /* 1459 * Note that the interface is now running. 1460 */ 1461 ifp->if_flags |= IFF_RUNNING; 1462 ifp->if_flags &= ~IFF_OACTIVE; 1463 sc->sc_if_flags = ifp->if_flags; 1464 1465 out: 1466 if (error) { 1467 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1468 ifp->if_timer = 0; 1469 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1470 } 1471 return (error); 1472 } 1473 1474 /* 1475 * ae_enable: 1476 * 1477 * Enable the chip. 1478 */ 1479 static int 1480 ae_enable(struct ae_softc *sc) 1481 { 1482 1483 if (AE_IS_ENABLED(sc) == 0) { 1484 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq, 1485 ae_intr, sc); 1486 if (sc->sc_ih == NULL) { 1487 printf("%s: unable to establish interrupt\n", 1488 sc->sc_dev.dv_xname); 1489 return (EIO); 1490 } 1491 sc->sc_flags |= AE_ENABLED; 1492 } 1493 return (0); 1494 } 1495 1496 /* 1497 * ae_disable: 1498 * 1499 * Disable the chip. 1500 */ 1501 static void 1502 ae_disable(struct ae_softc *sc) 1503 { 1504 1505 if (AE_IS_ENABLED(sc)) { 1506 arbus_intr_disestablish(sc->sc_ih); 1507 sc->sc_flags &= ~AE_ENABLED; 1508 } 1509 } 1510 1511 /* 1512 * ae_power: 1513 * 1514 * Power management (suspend/resume) hook. 1515 */ 1516 static void 1517 ae_power(int why, void *arg) 1518 { 1519 struct ae_softc *sc = arg; 1520 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1521 int s; 1522 1523 printf("power called: %d, %x\n", why, (uint32_t)arg); 1524 s = splnet(); 1525 switch (why) { 1526 case PWR_STANDBY: 1527 /* do nothing! */ 1528 break; 1529 case PWR_SUSPEND: 1530 ae_stop(ifp, 0); 1531 ae_disable(sc); 1532 break; 1533 case PWR_RESUME: 1534 if (ifp->if_flags & IFF_UP) { 1535 ae_enable(sc); 1536 ae_init(ifp); 1537 } 1538 break; 1539 case PWR_SOFTSUSPEND: 1540 case PWR_SOFTSTANDBY: 1541 case PWR_SOFTRESUME: 1542 break; 1543 } 1544 splx(s); 1545 } 1546 1547 /* 1548 * ae_rxdrain: 1549 * 1550 * Drain the receive queue. 1551 */ 1552 static void 1553 ae_rxdrain(struct ae_softc *sc) 1554 { 1555 struct ae_rxsoft *rxs; 1556 int i; 1557 1558 for (i = 0; i < AE_NRXDESC; i++) { 1559 rxs = &sc->sc_rxsoft[i]; 1560 if (rxs->rxs_mbuf != NULL) { 1561 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1562 m_freem(rxs->rxs_mbuf); 1563 rxs->rxs_mbuf = NULL; 1564 } 1565 } 1566 } 1567 1568 /* 1569 * ae_stop: [ ifnet interface function ] 1570 * 1571 * Stop transmission on the interface. 1572 */ 1573 static void 1574 ae_stop(struct ifnet *ifp, int disable) 1575 { 1576 struct ae_softc *sc = ifp->if_softc; 1577 struct ae_txsoft *txs; 1578 1579 if (sc->sc_tick != NULL) { 1580 /* Stop the one second clock. */ 1581 callout_stop(&sc->sc_tick_callout); 1582 } 1583 1584 /* Down the MII. */ 1585 mii_down(&sc->sc_mii); 1586 1587 /* Disable interrupts. */ 1588 AE_WRITE(sc, CSR_INTEN, 0); 1589 1590 /* Stop the transmit and receive processes. */ 1591 AE_WRITE(sc, CSR_OPMODE, 0); 1592 AE_WRITE(sc, CSR_RXLIST, 0); 1593 AE_WRITE(sc, CSR_TXLIST, 0); 1594 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE); 1595 AE_BARRIER(sc); 1596 1597 /* 1598 * Release any queued transmit buffers. 1599 */ 1600 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1601 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1602 if (txs->txs_mbuf != NULL) { 1603 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1604 m_freem(txs->txs_mbuf); 1605 txs->txs_mbuf = NULL; 1606 } 1607 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1608 } 1609 1610 /* 1611 * Mark the interface down and cancel the watchdog timer. 1612 */ 1613 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1614 sc->sc_if_flags = ifp->if_flags; 1615 ifp->if_timer = 0; 1616 1617 if (disable) { 1618 ae_rxdrain(sc); 1619 ae_disable(sc); 1620 } 1621 1622 /* 1623 * Reset the chip (needed on some flavors to actually disable it). 1624 */ 1625 ae_reset(sc); 1626 } 1627 1628 /* 1629 * ae_add_rxbuf: 1630 * 1631 * Add a receive buffer to the indicated descriptor. 1632 */ 1633 static int 1634 ae_add_rxbuf(struct ae_softc *sc, int idx) 1635 { 1636 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1637 struct mbuf *m; 1638 int error; 1639 1640 MGETHDR(m, M_DONTWAIT, MT_DATA); 1641 if (m == NULL) 1642 return (ENOBUFS); 1643 1644 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1645 MCLGET(m, M_DONTWAIT); 1646 if ((m->m_flags & M_EXT) == 0) { 1647 m_freem(m); 1648 return (ENOBUFS); 1649 } 1650 1651 if (rxs->rxs_mbuf != NULL) 1652 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1653 1654 rxs->rxs_mbuf = m; 1655 1656 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1657 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1658 BUS_DMA_READ|BUS_DMA_NOWAIT); 1659 if (error) { 1660 printf("%s: can't load rx DMA map %d, error = %d\n", 1661 sc->sc_dev.dv_xname, idx, error); 1662 panic("ae_add_rxbuf"); /* XXX */ 1663 } 1664 1665 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1666 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1667 1668 AE_INIT_RXDESC(sc, idx); 1669 1670 return (0); 1671 } 1672 1673 /* 1674 * ae_filter_setup: 1675 * 1676 * Set the chip's receive filter. 1677 */ 1678 static void 1679 ae_filter_setup(struct ae_softc *sc) 1680 { 1681 struct ethercom *ec = &sc->sc_ethercom; 1682 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1683 struct ether_multi *enm; 1684 struct ether_multistep step; 1685 uint32_t hash, mchash[2]; 1686 uint32_t macctl = 0; 1687 1688 /* 1689 * If the chip is running, we need to reset the interface, 1690 * and will revisit here (with IFF_RUNNING) clear. The 1691 * chip seems to really not like to have its multicast 1692 * filter programmed without a reset. 1693 */ 1694 if (ifp->if_flags & IFF_RUNNING) { 1695 (void) ae_init(ifp); 1696 return; 1697 } 1698 1699 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n", 1700 sc->sc_dev.dv_xname, sc->sc_flags)); 1701 1702 macctl = AE_READ(sc, CSR_MACCTL); 1703 macctl &= ~(MACCTL_PR | MACCTL_PM); 1704 macctl |= MACCTL_HASH; 1705 macctl |= MACCTL_HBD; 1706 macctl |= MACCTL_PR; 1707 1708 if (ifp->if_flags & IFF_PROMISC) { 1709 macctl |= MACCTL_PR; 1710 goto allmulti; 1711 } 1712 1713 mchash[0] = mchash[1] = 0; 1714 1715 ETHER_FIRST_MULTI(step, ec, enm); 1716 while (enm != NULL) { 1717 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1718 /* 1719 * We must listen to a range of multicast addresses. 1720 * For now, just accept all multicasts, rather than 1721 * trying to set only those filter bits needed to match 1722 * the range. (At this time, the only use of address 1723 * ranges is for IP multicast routing, for which the 1724 * range is big enough to require all bits set.) 1725 */ 1726 goto allmulti; 1727 } 1728 1729 /* Verify whether we use big or little endian hashes */ 1730 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f; 1731 mchash[hash >> 5] |= 1 << (hash & 0x1f); 1732 ETHER_NEXT_MULTI(step, enm); 1733 } 1734 ifp->if_flags &= ~IFF_ALLMULTI; 1735 goto setit; 1736 1737 allmulti: 1738 ifp->if_flags |= IFF_ALLMULTI; 1739 mchash[0] = mchash[1] = 0xffffffff; 1740 macctl |= MACCTL_PM; 1741 1742 setit: 1743 AE_WRITE(sc, CSR_HTHI, mchash[0]); 1744 AE_WRITE(sc, CSR_HTHI, mchash[1]); 1745 1746 AE_WRITE(sc, CSR_MACCTL, macctl); 1747 AE_BARRIER(sc); 1748 1749 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n", 1750 sc->sc_dev.dv_xname, macctl)); 1751 } 1752 1753 /* 1754 * ae_idle: 1755 * 1756 * Cause the transmit and/or receive processes to go idle. 1757 */ 1758 void 1759 ae_idle(struct ae_softc *sc, u_int32_t bits) 1760 { 1761 static const char * const txstate_names[] = { 1762 "STOPPED", 1763 "RUNNING - FETCH", 1764 "RUNNING - WAIT", 1765 "RUNNING - READING", 1766 "-- RESERVED --", 1767 "RUNNING - SETUP", 1768 "SUSPENDED", 1769 "RUNNING - CLOSE", 1770 }; 1771 static const char * const rxstate_names[] = { 1772 "STOPPED", 1773 "RUNNING - FETCH", 1774 "RUNNING - CHECK", 1775 "RUNNING - WAIT", 1776 "SUSPENDED", 1777 "RUNNING - CLOSE", 1778 "RUNNING - FLUSH", 1779 "RUNNING - QUEUE", 1780 }; 1781 1782 u_int32_t csr, ackmask = 0; 1783 int i; 1784 1785 if (bits & OPMODE_ST) 1786 ackmask |= STATUS_TPS; 1787 1788 if (bits & OPMODE_SR) 1789 ackmask |= STATUS_RPS; 1790 1791 AE_CLR(sc, CSR_OPMODE, bits); 1792 1793 for (i = 0; i < 1000; i++) { 1794 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask) 1795 break; 1796 delay(10); 1797 } 1798 1799 csr = AE_READ(sc, CSR_STATUS); 1800 if ((csr & ackmask) != ackmask) { 1801 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 && 1802 (csr & STATUS_TS) != STATUS_TS_STOPPED) { 1803 printf("%s: transmit process failed to idle: " 1804 "state %s\n", sc->sc_dev.dv_xname, 1805 txstate_names[(csr & STATUS_TS) >> 20]); 1806 } 1807 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 && 1808 (csr & STATUS_RS) != STATUS_RS_STOPPED) { 1809 printf("%s: receive process failed to idle: " 1810 "state %s\n", sc->sc_dev.dv_xname, 1811 rxstate_names[(csr & STATUS_RS) >> 17]); 1812 } 1813 } 1814 } 1815 1816 /***************************************************************************** 1817 * Support functions for MII-attached media. 1818 *****************************************************************************/ 1819 1820 /* 1821 * ae_mii_tick: 1822 * 1823 * One second timer, used to tick the MII. 1824 */ 1825 static void 1826 ae_mii_tick(void *arg) 1827 { 1828 struct ae_softc *sc = arg; 1829 int s; 1830 1831 if (!device_is_active(&sc->sc_dev)) 1832 return; 1833 1834 s = splnet(); 1835 mii_tick(&sc->sc_mii); 1836 splx(s); 1837 1838 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc); 1839 } 1840 1841 /* 1842 * ae_mii_statchg: [mii interface function] 1843 * 1844 * Callback from PHY when media changes. 1845 */ 1846 static void 1847 ae_mii_statchg(device_t self) 1848 { 1849 struct ae_softc *sc = device_private(self); 1850 uint32_t macctl, flowc; 1851 1852 //opmode = AE_READ(sc, CSR_OPMODE); 1853 macctl = AE_READ(sc, CSR_MACCTL); 1854 1855 /* XXX: do we need to do this? */ 1856 /* Idle the transmit and receive processes. */ 1857 //ae_idle(sc, OPMODE_ST|OPMODE_SR); 1858 1859 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1860 flowc = FLOWC_FCE; 1861 macctl &= ~MACCTL_DRO; 1862 macctl |= MACCTL_FDX; 1863 } else { 1864 flowc = 0; /* cannot do flow control in HDX */ 1865 macctl |= MACCTL_DRO; 1866 macctl &= ~MACCTL_FDX; 1867 } 1868 1869 AE_WRITE(sc, CSR_FLOWC, flowc); 1870 AE_WRITE(sc, CSR_MACCTL, macctl); 1871 1872 /* restore operational mode */ 1873 //AE_WRITE(sc, CSR_OPMODE, opmode); 1874 AE_BARRIER(sc); 1875 } 1876 1877 /* 1878 * ae_mii_readreg: 1879 * 1880 * Read a PHY register. 1881 */ 1882 static int 1883 ae_mii_readreg(device_t self, int phy, int reg) 1884 { 1885 struct ae_softc *sc = device_private(self); 1886 uint32_t addr; 1887 int i; 1888 1889 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT); 1890 AE_WRITE(sc, CSR_MIIADDR, addr); 1891 AE_BARRIER(sc); 1892 for (i = 0; i < 100000000; i++) { 1893 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1894 break; 1895 } 1896 1897 return (AE_READ(sc, CSR_MIIDATA) & 0xffff); 1898 } 1899 1900 /* 1901 * ae_mii_writereg: 1902 * 1903 * Write a PHY register. 1904 */ 1905 static void 1906 ae_mii_writereg(device_t self, int phy, int reg, int val) 1907 { 1908 struct ae_softc *sc = device_private(self); 1909 uint32_t addr; 1910 int i; 1911 1912 /* write the data register */ 1913 AE_WRITE(sc, CSR_MIIDATA, val); 1914 1915 /* write the address to latch it in */ 1916 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) | 1917 MIIADDR_WRITE; 1918 AE_WRITE(sc, CSR_MIIADDR, addr); 1919 AE_BARRIER(sc); 1920 1921 for (i = 0; i < 100000000; i++) { 1922 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1923 break; 1924 } 1925 } 1926