1 /* $Id: if_ae.c,v 1.18 2010/01/22 08:56:05 martin Exp $ */ 2 /*- 3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 4 * Copyright (c) 2006 Garrett D'Amore. 5 * All rights reserved. 6 * 7 * This code was written by Garrett D'Amore for the Champaign-Urbana 8 * Community Wireless Network Project. 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 3. All advertising materials mentioning features or use of this 20 * software must display the following acknowledgements: 21 * This product includes software developed by the Urbana-Champaign 22 * Independent Media Center. 23 * This product includes software developed by Garrett D'Amore. 24 * 4. Urbana-Champaign Independent Media Center's name and Garrett 25 * D'Amore's name may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 /*- 43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 44 * All rights reserved. 45 * 46 * This code is derived from software contributed to The NetBSD Foundation 47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 48 * NASA Ames Research Center; and by Charles M. Hannum. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 69 * POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72 /* 73 * Device driver for the onboard ethernet MAC found on the AR5312 74 * chip's AHB bus. 75 * 76 * This device is very simliar to the tulip in most regards, and 77 * the code is directly derived from NetBSD's tulip.c. However, it 78 * is different enough that it did not seem to be a good idea to 79 * add further complexity to the tulip driver, so we have our own. 80 * 81 * Also tulip has a lot of complexity in it for various parts/options 82 * that we don't need, and on these little boxes with only ~8MB RAM, we 83 * don't want any extra bloat. 84 */ 85 86 /* 87 * TODO: 88 * 89 * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align 90 * inbound packets on a half-word boundary, which would make life easier 91 * for TCP/IP. (Aligning IP headers on a word.) 92 * 93 * 2) There is stuff in original tulip to shut down the device when reacting 94 * to a a change in link status. Is that needed. 95 * 96 * 3) Test with variety of 10/100 HDX/FDX scenarios. 97 * 98 */ 99 100 #include <sys/cdefs.h> 101 __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.18 2010/01/22 08:56:05 martin Exp $"); 102 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/callout.h> 107 #include <sys/mbuf.h> 108 #include <sys/malloc.h> 109 #include <sys/kernel.h> 110 #include <sys/socket.h> 111 #include <sys/ioctl.h> 112 #include <sys/errno.h> 113 #include <sys/device.h> 114 115 #include <machine/endian.h> 116 117 #include <uvm/uvm_extern.h> 118 119 #include <net/if.h> 120 #include <net/if_dl.h> 121 #include <net/if_media.h> 122 #include <net/if_ether.h> 123 124 #include <net/bpf.h> 125 126 #include <machine/bus.h> 127 #include <machine/intr.h> 128 129 #include <dev/mii/mii.h> 130 #include <dev/mii/miivar.h> 131 #include <dev/mii/mii_bitbang.h> 132 133 #include <mips/atheros/include/arbusvar.h> 134 #include <mips/atheros/dev/aereg.h> 135 #include <mips/atheros/dev/aevar.h> 136 137 static const struct { 138 u_int32_t txth_opmode; /* OPMODE bits */ 139 const char *txth_name; /* name of mode */ 140 } ae_txthresh[] = { 141 { OPMODE_TR_32, "32 words" }, 142 { OPMODE_TR_64, "64 words" }, 143 { OPMODE_TR_128, "128 words" }, 144 { OPMODE_TR_256, "256 words" }, 145 { OPMODE_SF, "store and forward mode" }, 146 { 0, NULL }, 147 }; 148 149 static int ae_match(device_t, struct cfdata *, void *); 150 static void ae_attach(device_t, device_t, void *); 151 static int ae_detach(device_t, int); 152 static int ae_activate(device_t, enum devact); 153 154 static int ae_ifflags_cb(struct ethercom *); 155 static void ae_reset(struct ae_softc *); 156 static void ae_idle(struct ae_softc *, u_int32_t); 157 158 static void ae_start(struct ifnet *); 159 static void ae_watchdog(struct ifnet *); 160 static int ae_ioctl(struct ifnet *, u_long, void *); 161 static int ae_init(struct ifnet *); 162 static void ae_stop(struct ifnet *, int); 163 164 static void ae_shutdown(void *); 165 166 static void ae_rxdrain(struct ae_softc *); 167 static int ae_add_rxbuf(struct ae_softc *, int); 168 169 static int ae_enable(struct ae_softc *); 170 static void ae_disable(struct ae_softc *); 171 static void ae_power(int, void *); 172 173 static void ae_filter_setup(struct ae_softc *); 174 175 static int ae_intr(void *); 176 static void ae_rxintr(struct ae_softc *); 177 static void ae_txintr(struct ae_softc *); 178 179 static void ae_mii_tick(void *); 180 static void ae_mii_statchg(device_t); 181 182 static int ae_mii_readreg(device_t, int, int); 183 static void ae_mii_writereg(device_t, int, int, int); 184 185 #ifdef AE_DEBUG 186 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 187 printf x 188 #else 189 #define DPRINTF(sc, x) /* nothing */ 190 #endif 191 192 #ifdef AE_STATS 193 static void ae_print_stats(struct ae_softc *); 194 #endif 195 196 CFATTACH_DECL(ae, sizeof(struct ae_softc), 197 ae_match, ae_attach, ae_detach, ae_activate); 198 199 /* 200 * ae_match: 201 * 202 * Check for a device match. 203 */ 204 int 205 ae_match(device_t parent, struct cfdata *cf, void *aux) 206 { 207 struct arbus_attach_args *aa = aux; 208 209 if (strcmp(aa->aa_name, cf->cf_name) == 0) 210 return 1; 211 212 return 0; 213 214 } 215 216 /* 217 * ae_attach: 218 * 219 * Attach an ae interface to the system. 220 */ 221 void 222 ae_attach(device_t parent, device_t self, void *aux) 223 { 224 const uint8_t *enaddr; 225 prop_data_t ea; 226 struct ae_softc *sc = device_private(self); 227 struct arbus_attach_args *aa = aux; 228 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 229 int i, error; 230 231 callout_init(&sc->sc_tick_callout, 0); 232 233 printf(": Atheros AR531X 10/100 Ethernet\n"); 234 235 /* 236 * Try to get MAC address. 237 */ 238 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-address"); 239 if (ea == NULL) { 240 printf("%s: unable to get mac-addr property\n", 241 sc->sc_dev.dv_xname); 242 return; 243 } 244 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 245 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 246 enaddr = prop_data_data_nocopy(ea); 247 248 /* Announce ourselves. */ 249 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 250 ether_sprintf(enaddr)); 251 252 sc->sc_cirq = aa->aa_cirq; 253 sc->sc_mirq = aa->aa_mirq; 254 sc->sc_st = aa->aa_bst; 255 sc->sc_dmat = aa->aa_dmat; 256 257 SIMPLEQ_INIT(&sc->sc_txfreeq); 258 SIMPLEQ_INIT(&sc->sc_txdirtyq); 259 260 /* 261 * Map registers. 262 */ 263 sc->sc_size = aa->aa_size; 264 if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0, 265 &sc->sc_sh)) != 0) { 266 printf("%s: unable to map registers, error = %d\n", 267 sc->sc_dev.dv_xname, error); 268 goto fail_0; 269 } 270 271 /* 272 * Allocate the control data structures, and create and load the 273 * DMA map for it. 274 */ 275 if ((error = bus_dmamem_alloc(sc->sc_dmat, 276 sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 277 1, &sc->sc_cdnseg, 0)) != 0) { 278 printf("%s: unable to allocate control data, error = %d\n", 279 sc->sc_dev.dv_xname, error); 280 goto fail_1; 281 } 282 283 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg, 284 sizeof(struct ae_control_data), (void **)&sc->sc_control_data, 285 BUS_DMA_COHERENT)) != 0) { 286 printf("%s: unable to map control data, error = %d\n", 287 sc->sc_dev.dv_xname, error); 288 goto fail_2; 289 } 290 291 if ((error = bus_dmamap_create(sc->sc_dmat, 292 sizeof(struct ae_control_data), 1, 293 sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 294 printf("%s: unable to create control data DMA map, " 295 "error = %d\n", sc->sc_dev.dv_xname, error); 296 goto fail_3; 297 } 298 299 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 300 sc->sc_control_data, sizeof(struct ae_control_data), NULL, 301 0)) != 0) { 302 printf("%s: unable to load control data DMA map, error = %d\n", 303 sc->sc_dev.dv_xname, error); 304 goto fail_4; 305 } 306 307 /* 308 * Create the transmit buffer DMA maps. 309 */ 310 for (i = 0; i < AE_TXQUEUELEN; i++) { 311 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 312 AE_NTXSEGS, MCLBYTES, 0, 0, 313 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 314 printf("%s: unable to create tx DMA map %d, " 315 "error = %d\n", sc->sc_dev.dv_xname, i, error); 316 goto fail_5; 317 } 318 } 319 320 /* 321 * Create the receive buffer DMA maps. 322 */ 323 for (i = 0; i < AE_NRXDESC; i++) { 324 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 325 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 326 printf("%s: unable to create rx DMA map %d, " 327 "error = %d\n", sc->sc_dev.dv_xname, i, error); 328 goto fail_6; 329 } 330 sc->sc_rxsoft[i].rxs_mbuf = NULL; 331 } 332 333 /* 334 * Reset the chip to a known state. 335 */ 336 ae_reset(sc); 337 338 /* 339 * From this point forward, the attachment cannot fail. A failure 340 * before this point releases all resources that may have been 341 * allocated. 342 */ 343 sc->sc_flags |= AE_ATTACHED; 344 345 /* 346 * Initialize our media structures. This may probe the MII, if 347 * present. 348 */ 349 sc->sc_mii.mii_ifp = ifp; 350 sc->sc_mii.mii_readreg = ae_mii_readreg; 351 sc->sc_mii.mii_writereg = ae_mii_writereg; 352 sc->sc_mii.mii_statchg = ae_mii_statchg; 353 sc->sc_ethercom.ec_mii = &sc->sc_mii; 354 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 355 ether_mediastatus); 356 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 357 MII_OFFSET_ANY, 0); 358 359 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 360 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 361 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 362 } else 363 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 364 365 sc->sc_tick = ae_mii_tick; 366 367 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 368 ifp->if_softc = sc; 369 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 370 sc->sc_if_flags = ifp->if_flags; 371 ifp->if_ioctl = ae_ioctl; 372 ifp->if_start = ae_start; 373 ifp->if_watchdog = ae_watchdog; 374 ifp->if_init = ae_init; 375 ifp->if_stop = ae_stop; 376 IFQ_SET_READY(&ifp->if_snd); 377 378 /* 379 * We can support 802.1Q VLAN-sized frames. 380 */ 381 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 382 383 /* 384 * Attach the interface. 385 */ 386 if_attach(ifp); 387 ether_ifattach(ifp, enaddr); 388 ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb); 389 390 #if NRND > 0 391 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname, 392 RND_TYPE_NET, 0); 393 #endif 394 395 /* 396 * Make sure the interface is shutdown during reboot. 397 */ 398 sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc); 399 if (sc->sc_sdhook == NULL) 400 printf("%s: WARNING: unable to establish shutdown hook\n", 401 sc->sc_dev.dv_xname); 402 403 /* 404 * Add a suspend hook to make sure we come back up after a 405 * resume. 406 */ 407 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname, 408 ae_power, sc); 409 if (sc->sc_powerhook == NULL) 410 printf("%s: WARNING: unable to establish power hook\n", 411 sc->sc_dev.dv_xname); 412 return; 413 414 /* 415 * Free any resources we've allocated during the failed attach 416 * attempt. Do this in reverse order and fall through. 417 */ 418 fail_6: 419 for (i = 0; i < AE_NRXDESC; i++) { 420 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 421 bus_dmamap_destroy(sc->sc_dmat, 422 sc->sc_rxsoft[i].rxs_dmamap); 423 } 424 fail_5: 425 for (i = 0; i < AE_TXQUEUELEN; i++) { 426 if (sc->sc_txsoft[i].txs_dmamap != NULL) 427 bus_dmamap_destroy(sc->sc_dmat, 428 sc->sc_txsoft[i].txs_dmamap); 429 } 430 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 431 fail_4: 432 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 433 fail_3: 434 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 435 sizeof(struct ae_control_data)); 436 fail_2: 437 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 438 fail_1: 439 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 440 fail_0: 441 return; 442 } 443 444 /* 445 * ae_activate: 446 * 447 * Handle device activation/deactivation requests. 448 */ 449 int 450 ae_activate(device_t self, enum devact act) 451 { 452 struct ae_softc *sc = device_private(self); 453 454 switch (act) { 455 case DVACT_DEACTIVATE: 456 if_deactivate(&sc->sc_ethercom.ec_if); 457 return 0; 458 default: 459 return EOPNOTSUPP; 460 } 461 } 462 463 /* 464 * ae_detach: 465 * 466 * Detach a device interface. 467 */ 468 int 469 ae_detach(device_t self, int flags) 470 { 471 struct ae_softc *sc = device_private(self); 472 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 473 struct ae_rxsoft *rxs; 474 struct ae_txsoft *txs; 475 int i; 476 477 /* 478 * Succeed now if there isn't any work to do. 479 */ 480 if ((sc->sc_flags & AE_ATTACHED) == 0) 481 return (0); 482 483 /* Unhook our tick handler. */ 484 if (sc->sc_tick) 485 callout_stop(&sc->sc_tick_callout); 486 487 /* Detach all PHYs */ 488 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 489 490 /* Delete all remaining media. */ 491 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 492 493 #if NRND > 0 494 rnd_detach_source(&sc->sc_rnd_source); 495 #endif 496 ether_ifdetach(ifp); 497 if_detach(ifp); 498 499 for (i = 0; i < AE_NRXDESC; i++) { 500 rxs = &sc->sc_rxsoft[i]; 501 if (rxs->rxs_mbuf != NULL) { 502 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 503 m_freem(rxs->rxs_mbuf); 504 rxs->rxs_mbuf = NULL; 505 } 506 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); 507 } 508 for (i = 0; i < AE_TXQUEUELEN; i++) { 509 txs = &sc->sc_txsoft[i]; 510 if (txs->txs_mbuf != NULL) { 511 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 512 m_freem(txs->txs_mbuf); 513 txs->txs_mbuf = NULL; 514 } 515 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); 516 } 517 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 518 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 519 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 520 sizeof(struct ae_control_data)); 521 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); 522 523 shutdownhook_disestablish(sc->sc_sdhook); 524 powerhook_disestablish(sc->sc_powerhook); 525 526 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); 527 528 529 return (0); 530 } 531 532 /* 533 * ae_shutdown: 534 * 535 * Make sure the interface is stopped at reboot time. 536 */ 537 static void 538 ae_shutdown(void *arg) 539 { 540 struct ae_softc *sc = arg; 541 542 ae_stop(&sc->sc_ethercom.ec_if, 1); 543 } 544 545 /* 546 * ae_start: [ifnet interface function] 547 * 548 * Start packet transmission on the interface. 549 */ 550 static void 551 ae_start(struct ifnet *ifp) 552 { 553 struct ae_softc *sc = ifp->if_softc; 554 struct mbuf *m0, *m; 555 struct ae_txsoft *txs, *last_txs = NULL; 556 bus_dmamap_t dmamap; 557 int error, firsttx, nexttx, lasttx = 1, ofree, seg; 558 559 DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n", 560 sc->sc_dev.dv_xname, sc->sc_flags, ifp->if_flags)); 561 562 563 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 564 return; 565 566 /* 567 * Remember the previous number of free descriptors and 568 * the first descriptor we'll use. 569 */ 570 ofree = sc->sc_txfree; 571 firsttx = sc->sc_txnext; 572 573 DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n", 574 sc->sc_dev.dv_xname, ofree, firsttx)); 575 576 /* 577 * Loop through the send queue, setting up transmit descriptors 578 * until we drain the queue, or use up all available transmit 579 * descriptors. 580 */ 581 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 582 sc->sc_txfree != 0) { 583 /* 584 * Grab a packet off the queue. 585 */ 586 IFQ_POLL(&ifp->if_snd, m0); 587 if (m0 == NULL) 588 break; 589 m = NULL; 590 591 dmamap = txs->txs_dmamap; 592 593 /* 594 * Load the DMA map. If this fails, the packet either 595 * didn't fit in the alloted number of segments, or we were 596 * short on resources. In this case, we'll copy and try 597 * again. 598 */ 599 if (((mtod(m0, uintptr_t) & 3) != 0) || 600 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 601 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 602 MGETHDR(m, M_DONTWAIT, MT_DATA); 603 if (m == NULL) { 604 printf("%s: unable to allocate Tx mbuf\n", 605 sc->sc_dev.dv_xname); 606 break; 607 } 608 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 609 if (m0->m_pkthdr.len > MHLEN) { 610 MCLGET(m, M_DONTWAIT); 611 if ((m->m_flags & M_EXT) == 0) { 612 printf("%s: unable to allocate Tx " 613 "cluster\n", sc->sc_dev.dv_xname); 614 m_freem(m); 615 break; 616 } 617 } 618 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 619 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 620 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 621 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 622 if (error) { 623 printf("%s: unable to load Tx buffer, " 624 "error = %d\n", sc->sc_dev.dv_xname, 625 error); 626 break; 627 } 628 } 629 630 /* 631 * Ensure we have enough descriptors free to describe 632 * the packet. 633 */ 634 if (dmamap->dm_nsegs > sc->sc_txfree) { 635 /* 636 * Not enough free descriptors to transmit this 637 * packet. We haven't committed to anything yet, 638 * so just unload the DMA map, put the packet 639 * back on the queue, and punt. Notify the upper 640 * layer that there are no more slots left. 641 * 642 * XXX We could allocate an mbuf and copy, but 643 * XXX it is worth it? 644 */ 645 ifp->if_flags |= IFF_OACTIVE; 646 bus_dmamap_unload(sc->sc_dmat, dmamap); 647 if (m != NULL) 648 m_freem(m); 649 break; 650 } 651 652 IFQ_DEQUEUE(&ifp->if_snd, m0); 653 if (m != NULL) { 654 m_freem(m0); 655 m0 = m; 656 } 657 658 /* 659 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 660 */ 661 662 /* Sync the DMA map. */ 663 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 664 BUS_DMASYNC_PREWRITE); 665 666 /* 667 * Initialize the transmit descriptors. 668 */ 669 for (nexttx = sc->sc_txnext, seg = 0; 670 seg < dmamap->dm_nsegs; 671 seg++, nexttx = AE_NEXTTX(nexttx)) { 672 /* 673 * If this is the first descriptor we're 674 * enqueueing, don't set the OWN bit just 675 * yet. That could cause a race condition. 676 * We'll do it below. 677 */ 678 sc->sc_txdescs[nexttx].ad_status = 679 (nexttx == firsttx) ? 0 : ADSTAT_OWN; 680 sc->sc_txdescs[nexttx].ad_bufaddr1 = 681 dmamap->dm_segs[seg].ds_addr; 682 sc->sc_txdescs[nexttx].ad_ctl = 683 (dmamap->dm_segs[seg].ds_len << 684 ADCTL_SIZE1_SHIFT) | 685 (nexttx == (AE_NTXDESC - 1) ? 686 ADCTL_ER : 0); 687 lasttx = nexttx; 688 } 689 690 KASSERT(lasttx != -1); 691 692 /* Set `first segment' and `last segment' appropriately. */ 693 sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS; 694 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS; 695 696 #ifdef AE_DEBUG 697 if (ifp->if_flags & IFF_DEBUG) { 698 printf(" txsoft %p transmit chain:\n", txs); 699 for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) { 700 printf(" descriptor %d:\n", seg); 701 printf(" ad_status: 0x%08x\n", 702 sc->sc_txdescs[seg].ad_status); 703 printf(" ad_ctl: 0x%08x\n", 704 sc->sc_txdescs[seg].ad_ctl); 705 printf(" ad_bufaddr1: 0x%08x\n", 706 sc->sc_txdescs[seg].ad_bufaddr1); 707 printf(" ad_bufaddr2: 0x%08x\n", 708 sc->sc_txdescs[seg].ad_bufaddr2); 709 if (seg == lasttx) 710 break; 711 } 712 } 713 #endif 714 715 /* Sync the descriptors we're using. */ 716 AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 717 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 718 719 /* 720 * Store a pointer to the packet so we can free it later, 721 * and remember what txdirty will be once the packet is 722 * done. 723 */ 724 txs->txs_mbuf = m0; 725 txs->txs_firstdesc = sc->sc_txnext; 726 txs->txs_lastdesc = lasttx; 727 txs->txs_ndescs = dmamap->dm_nsegs; 728 729 /* Advance the tx pointer. */ 730 sc->sc_txfree -= dmamap->dm_nsegs; 731 sc->sc_txnext = nexttx; 732 733 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 734 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 735 736 last_txs = txs; 737 738 /* 739 * Pass the packet to any BPF listeners. 740 */ 741 if (ifp->if_bpf) 742 bpf_ops->bpf_mtap(ifp->if_bpf, m0); 743 } 744 745 if (txs == NULL || sc->sc_txfree == 0) { 746 /* No more slots left; notify upper layer. */ 747 ifp->if_flags |= IFF_OACTIVE; 748 } 749 750 if (sc->sc_txfree != ofree) { 751 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 752 sc->sc_dev.dv_xname, lasttx, firsttx)); 753 /* 754 * Cause a transmit interrupt to happen on the 755 * last packet we enqueued. 756 */ 757 sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC; 758 AE_CDTXSYNC(sc, lasttx, 1, 759 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 760 761 /* 762 * The entire packet chain is set up. Give the 763 * first descriptor to the chip now. 764 */ 765 sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN; 766 AE_CDTXSYNC(sc, firsttx, 1, 767 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 768 769 /* Wake up the transmitter. */ 770 /* XXX USE AUTOPOLLING? */ 771 AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD); 772 AE_BARRIER(sc); 773 774 /* Set a watchdog timer in case the chip flakes out. */ 775 ifp->if_timer = 5; 776 } 777 } 778 779 /* 780 * ae_watchdog: [ifnet interface function] 781 * 782 * Watchdog timer handler. 783 */ 784 static void 785 ae_watchdog(struct ifnet *ifp) 786 { 787 struct ae_softc *sc = ifp->if_softc; 788 int doing_transmit; 789 790 doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq)); 791 792 if (doing_transmit) { 793 printf("%s: transmit timeout\n", sc->sc_dev.dv_xname); 794 ifp->if_oerrors++; 795 } 796 else 797 printf("%s: spurious watchdog timeout\n", sc->sc_dev.dv_xname); 798 799 (void) ae_init(ifp); 800 801 /* Try to get more packets going. */ 802 ae_start(ifp); 803 } 804 805 /* If the interface is up and running, only modify the receive 806 * filter when changing to/from promiscuous mode. Otherwise return 807 * ENETRESET so that ether_ioctl will reset the chip. 808 */ 809 static int 810 ae_ifflags_cb(struct ethercom *ec) 811 { 812 struct ifnet *ifp = &ec->ec_if; 813 struct ae_softc *sc = ifp->if_softc; 814 int change = ifp->if_flags ^ sc->sc_if_flags; 815 816 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 817 return ENETRESET; 818 else if ((change & IFF_PROMISC) != 0) 819 ae_filter_setup(sc); 820 return 0; 821 } 822 823 /* 824 * ae_ioctl: [ifnet interface function] 825 * 826 * Handle control requests from the operator. 827 */ 828 static int 829 ae_ioctl(struct ifnet *ifp, u_long cmd, void *data) 830 { 831 struct ae_softc *sc = ifp->if_softc; 832 int s, error; 833 834 s = splnet(); 835 836 error = ether_ioctl(ifp, cmd, data); 837 if (error == ENETRESET) { 838 if (ifp->if_flags & IFF_RUNNING) { 839 /* 840 * Multicast list has changed. Set the 841 * hardware filter accordingly. 842 */ 843 ae_filter_setup(sc); 844 } 845 error = 0; 846 } 847 848 /* Try to get more packets going. */ 849 if (AE_IS_ENABLED(sc)) 850 ae_start(ifp); 851 852 sc->sc_if_flags = ifp->if_flags; 853 splx(s); 854 return (error); 855 } 856 857 /* 858 * ae_intr: 859 * 860 * Interrupt service routine. 861 */ 862 int 863 ae_intr(void *arg) 864 { 865 struct ae_softc *sc = arg; 866 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 867 u_int32_t status, rxstatus, txstatus; 868 int handled = 0, txthresh; 869 870 DPRINTF(sc, ("%s: ae_intr\n", sc->sc_dev.dv_xname)); 871 872 #ifdef DEBUG 873 if (AE_IS_ENABLED(sc) == 0) 874 panic("%s: ae_intr: not enabled", sc->sc_dev.dv_xname); 875 #endif 876 877 /* 878 * If the interface isn't running, the interrupt couldn't 879 * possibly have come from us. 880 */ 881 if ((ifp->if_flags & IFF_RUNNING) == 0 || 882 !device_is_active(&sc->sc_dev)) { 883 printf("spurious?!?\n"); 884 return (0); 885 } 886 887 for (;;) { 888 status = AE_READ(sc, CSR_STATUS); 889 if (status) { 890 AE_WRITE(sc, CSR_STATUS, status); 891 AE_BARRIER(sc); 892 } 893 894 if ((status & sc->sc_inten) == 0) 895 break; 896 897 handled = 1; 898 899 rxstatus = status & sc->sc_rxint_mask; 900 txstatus = status & sc->sc_txint_mask; 901 902 if (rxstatus) { 903 /* Grab new any new packets. */ 904 ae_rxintr(sc); 905 906 if (rxstatus & STATUS_RU) { 907 printf("%s: receive ring overrun\n", 908 sc->sc_dev.dv_xname); 909 /* Get the receive process going again. */ 910 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 911 AE_BARRIER(sc); 912 break; 913 } 914 } 915 916 if (txstatus) { 917 /* Sweep up transmit descriptors. */ 918 ae_txintr(sc); 919 920 if (txstatus & STATUS_TJT) 921 printf("%s: transmit jabber timeout\n", 922 sc->sc_dev.dv_xname); 923 924 if (txstatus & STATUS_UNF) { 925 /* 926 * Increase our transmit threshold if 927 * another is available. 928 */ 929 txthresh = sc->sc_txthresh + 1; 930 if (ae_txthresh[txthresh].txth_name != NULL) { 931 uint32_t opmode; 932 /* Idle the transmit process. */ 933 opmode = AE_READ(sc, CSR_OPMODE); 934 ae_idle(sc, OPMODE_ST); 935 936 sc->sc_txthresh = txthresh; 937 opmode &= 938 ~(OPMODE_TR|OPMODE_SF); 939 opmode |= 940 ae_txthresh[txthresh].txth_opmode; 941 printf("%s: transmit underrun; new " 942 "threshold: %s\n", 943 sc->sc_dev.dv_xname, 944 ae_txthresh[txthresh].txth_name); 945 946 /* 947 * Set the new threshold and restart 948 * the transmit process. 949 */ 950 AE_WRITE(sc, CSR_OPMODE, opmode); 951 AE_BARRIER(sc); 952 } 953 /* 954 * XXX Log every Nth underrun from 955 * XXX now on? 956 */ 957 } 958 } 959 960 if (status & (STATUS_TPS|STATUS_RPS)) { 961 if (status & STATUS_TPS) 962 printf("%s: transmit process stopped\n", 963 sc->sc_dev.dv_xname); 964 if (status & STATUS_RPS) 965 printf("%s: receive process stopped\n", 966 sc->sc_dev.dv_xname); 967 (void) ae_init(ifp); 968 break; 969 } 970 971 if (status & STATUS_SE) { 972 const char *str; 973 974 if (status & STATUS_TX_ABORT) 975 str = "tx abort"; 976 else if (status & STATUS_RX_ABORT) 977 str = "rx abort"; 978 else 979 str = "unknown error"; 980 981 printf("%s: fatal system error: %s\n", 982 sc->sc_dev.dv_xname, str); 983 (void) ae_init(ifp); 984 break; 985 } 986 987 /* 988 * Not handled: 989 * 990 * Transmit buffer unavailable -- normal 991 * condition, nothing to do, really. 992 * 993 * General purpose timer experied -- we don't 994 * use the general purpose timer. 995 * 996 * Early receive interrupt -- not available on 997 * all chips, we just use RI. We also only 998 * use single-segment receive DMA, so this 999 * is mostly useless. 1000 */ 1001 } 1002 1003 /* Try to get more packets going. */ 1004 ae_start(ifp); 1005 1006 #if NRND > 0 1007 if (handled) 1008 rnd_add_uint32(&sc->sc_rnd_source, status); 1009 #endif 1010 return (handled); 1011 } 1012 1013 /* 1014 * ae_rxintr: 1015 * 1016 * Helper; handle receive interrupts. 1017 */ 1018 static void 1019 ae_rxintr(struct ae_softc *sc) 1020 { 1021 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1022 struct ether_header *eh; 1023 struct ae_rxsoft *rxs; 1024 struct mbuf *m; 1025 u_int32_t rxstat; 1026 int i, len; 1027 1028 for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) { 1029 rxs = &sc->sc_rxsoft[i]; 1030 1031 AE_CDRXSYNC(sc, i, 1032 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1033 1034 rxstat = sc->sc_rxdescs[i].ad_status; 1035 1036 if (rxstat & ADSTAT_OWN) { 1037 /* 1038 * We have processed all of the receive buffers. 1039 */ 1040 break; 1041 } 1042 1043 /* 1044 * If any collisions were seen on the wire, count one. 1045 */ 1046 if (rxstat & ADSTAT_Rx_CS) 1047 ifp->if_collisions++; 1048 1049 /* 1050 * If an error occurred, update stats, clear the status 1051 * word, and leave the packet buffer in place. It will 1052 * simply be reused the next time the ring comes around. 1053 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long 1054 * error. 1055 */ 1056 if (rxstat & ADSTAT_ES && 1057 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 || 1058 (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF | 1059 ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) { 1060 #define PRINTERR(bit, str) \ 1061 if (rxstat & (bit)) \ 1062 printf("%s: receive error: %s\n", \ 1063 sc->sc_dev.dv_xname, str) 1064 ifp->if_ierrors++; 1065 PRINTERR(ADSTAT_Rx_DE, "descriptor error"); 1066 PRINTERR(ADSTAT_Rx_RF, "runt frame"); 1067 PRINTERR(ADSTAT_Rx_TL, "frame too long"); 1068 PRINTERR(ADSTAT_Rx_RE, "MII error"); 1069 PRINTERR(ADSTAT_Rx_DB, "dribbling bit"); 1070 PRINTERR(ADSTAT_Rx_CE, "CRC error"); 1071 #undef PRINTERR 1072 AE_INIT_RXDESC(sc, i); 1073 continue; 1074 } 1075 1076 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1077 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1078 1079 /* 1080 * No errors; receive the packet. Note the chip 1081 * includes the CRC with every packet. 1082 */ 1083 len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN; 1084 1085 /* 1086 * XXX: the Atheros part can align on half words. what 1087 * is the performance implication of this? Probably 1088 * minimal, and we should use it... 1089 */ 1090 #ifdef __NO_STRICT_ALIGNMENT 1091 /* 1092 * Allocate a new mbuf cluster. If that fails, we are 1093 * out of memory, and must drop the packet and recycle 1094 * the buffer that's already attached to this descriptor. 1095 */ 1096 m = rxs->rxs_mbuf; 1097 if (ae_add_rxbuf(sc, i) != 0) { 1098 ifp->if_ierrors++; 1099 AE_INIT_RXDESC(sc, i); 1100 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1101 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1102 continue; 1103 } 1104 #else 1105 /* 1106 * The chip's receive buffers must be 4-byte aligned. 1107 * But this means that the data after the Ethernet header 1108 * is misaligned. We must allocate a new buffer and 1109 * copy the data, shifted forward 2 bytes. 1110 */ 1111 MGETHDR(m, M_DONTWAIT, MT_DATA); 1112 if (m == NULL) { 1113 dropit: 1114 ifp->if_ierrors++; 1115 AE_INIT_RXDESC(sc, i); 1116 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1117 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1118 continue; 1119 } 1120 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1121 if (len > (MHLEN - 2)) { 1122 MCLGET(m, M_DONTWAIT); 1123 if ((m->m_flags & M_EXT) == 0) { 1124 m_freem(m); 1125 goto dropit; 1126 } 1127 } 1128 m->m_data += 2; 1129 1130 /* 1131 * Note that we use clusters for incoming frames, so the 1132 * buffer is virtually contiguous. 1133 */ 1134 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); 1135 1136 /* Allow the receive descriptor to continue using its mbuf. */ 1137 AE_INIT_RXDESC(sc, i); 1138 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1139 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1140 #endif /* __NO_STRICT_ALIGNMENT */ 1141 1142 ifp->if_ipackets++; 1143 eh = mtod(m, struct ether_header *); 1144 m->m_pkthdr.rcvif = ifp; 1145 m->m_pkthdr.len = m->m_len = len; 1146 1147 /* 1148 * Pass this up to any BPF listeners, but only 1149 * pass it up the stack if its for us. 1150 */ 1151 if (ifp->if_bpf) 1152 bpf_ops->bpf_mtap(ifp->if_bpf, m); 1153 1154 /* Pass it on. */ 1155 (*ifp->if_input)(ifp, m); 1156 } 1157 1158 /* Update the receive pointer. */ 1159 sc->sc_rxptr = i; 1160 } 1161 1162 /* 1163 * ae_txintr: 1164 * 1165 * Helper; handle transmit interrupts. 1166 */ 1167 static void 1168 ae_txintr(struct ae_softc *sc) 1169 { 1170 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1171 struct ae_txsoft *txs; 1172 u_int32_t txstat; 1173 1174 DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n", 1175 sc->sc_dev.dv_xname, sc->sc_flags)); 1176 1177 ifp->if_flags &= ~IFF_OACTIVE; 1178 1179 /* 1180 * Go through our Tx list and free mbufs for those 1181 * frames that have been transmitted. 1182 */ 1183 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1184 AE_CDTXSYNC(sc, txs->txs_lastdesc, 1185 txs->txs_ndescs, 1186 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1187 1188 #ifdef AE_DEBUG 1189 if (ifp->if_flags & IFF_DEBUG) { 1190 int i; 1191 printf(" txsoft %p transmit chain:\n", txs); 1192 for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) { 1193 printf(" descriptor %d:\n", i); 1194 printf(" ad_status: 0x%08x\n", 1195 sc->sc_txdescs[i].ad_status); 1196 printf(" ad_ctl: 0x%08x\n", 1197 sc->sc_txdescs[i].ad_ctl); 1198 printf(" ad_bufaddr1: 0x%08x\n", 1199 sc->sc_txdescs[i].ad_bufaddr1); 1200 printf(" ad_bufaddr2: 0x%08x\n", 1201 sc->sc_txdescs[i].ad_bufaddr2); 1202 if (i == txs->txs_lastdesc) 1203 break; 1204 } 1205 } 1206 #endif 1207 1208 txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status; 1209 if (txstat & ADSTAT_OWN) 1210 break; 1211 1212 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1213 1214 sc->sc_txfree += txs->txs_ndescs; 1215 1216 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1217 0, txs->txs_dmamap->dm_mapsize, 1218 BUS_DMASYNC_POSTWRITE); 1219 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1220 m_freem(txs->txs_mbuf); 1221 txs->txs_mbuf = NULL; 1222 1223 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1224 1225 /* 1226 * Check for errors and collisions. 1227 */ 1228 #ifdef AE_STATS 1229 if (txstat & ADSTAT_Tx_UF) 1230 sc->sc_stats.ts_tx_uf++; 1231 if (txstat & ADSTAT_Tx_TO) 1232 sc->sc_stats.ts_tx_to++; 1233 if (txstat & ADSTAT_Tx_EC) 1234 sc->sc_stats.ts_tx_ec++; 1235 if (txstat & ADSTAT_Tx_LC) 1236 sc->sc_stats.ts_tx_lc++; 1237 #endif 1238 1239 if (txstat & (ADSTAT_Tx_UF|ADSTAT_Tx_TO)) 1240 ifp->if_oerrors++; 1241 1242 if (txstat & ADSTAT_Tx_EC) 1243 ifp->if_collisions += 16; 1244 else 1245 ifp->if_collisions += ADSTAT_Tx_COLLISIONS(txstat); 1246 if (txstat & ADSTAT_Tx_LC) 1247 ifp->if_collisions++; 1248 1249 ifp->if_opackets++; 1250 } 1251 1252 /* 1253 * If there are no more pending transmissions, cancel the watchdog 1254 * timer. 1255 */ 1256 if (txs == NULL) 1257 ifp->if_timer = 0; 1258 } 1259 1260 #ifdef AE_STATS 1261 void 1262 ae_print_stats(struct ae_softc *sc) 1263 { 1264 1265 printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n", 1266 sc->sc_dev.dv_xname, 1267 sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to, 1268 sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc); 1269 } 1270 #endif 1271 1272 /* 1273 * ae_reset: 1274 * 1275 * Perform a soft reset on the chip. 1276 */ 1277 void 1278 ae_reset(struct ae_softc *sc) 1279 { 1280 int i; 1281 1282 AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR); 1283 AE_BARRIER(sc); 1284 1285 /* 1286 * The chip doesn't take itself out of reset automatically. 1287 * We need to do so after 2us. 1288 */ 1289 delay(10); 1290 AE_WRITE(sc, CSR_BUSMODE, 0); 1291 AE_BARRIER(sc); 1292 1293 for (i = 0; i < 1000; i++) { 1294 /* 1295 * Wait a bit for the reset to complete before peeking 1296 * at the chip again. 1297 */ 1298 delay(10); 1299 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0) 1300 break; 1301 } 1302 1303 if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR)) 1304 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 1305 1306 delay(1000); 1307 } 1308 1309 /* 1310 * ae_init: [ ifnet interface function ] 1311 * 1312 * Initialize the interface. Must be called at splnet(). 1313 */ 1314 static int 1315 ae_init(struct ifnet *ifp) 1316 { 1317 struct ae_softc *sc = ifp->if_softc; 1318 struct ae_txsoft *txs; 1319 struct ae_rxsoft *rxs; 1320 const uint8_t *enaddr; 1321 int i, error = 0; 1322 1323 if ((error = ae_enable(sc)) != 0) 1324 goto out; 1325 1326 /* 1327 * Cancel any pending I/O. 1328 */ 1329 ae_stop(ifp, 0); 1330 1331 /* 1332 * Reset the chip to a known state. 1333 */ 1334 ae_reset(sc); 1335 1336 /* 1337 * Initialize the BUSMODE register. 1338 */ 1339 AE_WRITE(sc, CSR_BUSMODE, 1340 /* XXX: not sure if this is a good thing or not... */ 1341 //BUSMODE_ALIGN_16B | 1342 BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW); 1343 AE_BARRIER(sc); 1344 1345 /* 1346 * Initialize the transmit descriptor ring. 1347 */ 1348 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1349 for (i = 0; i < AE_NTXDESC; i++) { 1350 sc->sc_txdescs[i].ad_ctl = 0; 1351 sc->sc_txdescs[i].ad_bufaddr2 = 1352 AE_CDTXADDR(sc, AE_NEXTTX(i)); 1353 } 1354 sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER; 1355 AE_CDTXSYNC(sc, 0, AE_NTXDESC, 1356 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1357 sc->sc_txfree = AE_NTXDESC; 1358 sc->sc_txnext = 0; 1359 1360 /* 1361 * Initialize the transmit job descriptors. 1362 */ 1363 SIMPLEQ_INIT(&sc->sc_txfreeq); 1364 SIMPLEQ_INIT(&sc->sc_txdirtyq); 1365 for (i = 0; i < AE_TXQUEUELEN; i++) { 1366 txs = &sc->sc_txsoft[i]; 1367 txs->txs_mbuf = NULL; 1368 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1369 } 1370 1371 /* 1372 * Initialize the receive descriptor and receive job 1373 * descriptor rings. 1374 */ 1375 for (i = 0; i < AE_NRXDESC; i++) { 1376 rxs = &sc->sc_rxsoft[i]; 1377 if (rxs->rxs_mbuf == NULL) { 1378 if ((error = ae_add_rxbuf(sc, i)) != 0) { 1379 printf("%s: unable to allocate or map rx " 1380 "buffer %d, error = %d\n", 1381 sc->sc_dev.dv_xname, i, error); 1382 /* 1383 * XXX Should attempt to run with fewer receive 1384 * XXX buffers instead of just failing. 1385 */ 1386 ae_rxdrain(sc); 1387 goto out; 1388 } 1389 } else 1390 AE_INIT_RXDESC(sc, i); 1391 } 1392 sc->sc_rxptr = 0; 1393 1394 /* 1395 * Initialize the interrupt mask and enable interrupts. 1396 */ 1397 /* normal interrupts */ 1398 sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS; 1399 1400 /* abnormal interrupts */ 1401 sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF | 1402 STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS; 1403 1404 sc->sc_rxint_mask = STATUS_RI|STATUS_RU; 1405 sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT; 1406 1407 sc->sc_rxint_mask &= sc->sc_inten; 1408 sc->sc_txint_mask &= sc->sc_inten; 1409 1410 AE_WRITE(sc, CSR_INTEN, sc->sc_inten); 1411 AE_WRITE(sc, CSR_STATUS, 0xffffffff); 1412 1413 /* 1414 * Give the transmit and receive rings to the chip. 1415 */ 1416 AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext)); 1417 AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr)); 1418 AE_BARRIER(sc); 1419 1420 /* 1421 * Set the station address. 1422 */ 1423 enaddr = CLLADDR(ifp->if_sadl); 1424 AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]); 1425 AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 | 1426 enaddr[1] << 8 | enaddr[0]); 1427 AE_BARRIER(sc); 1428 1429 /* 1430 * Set the receive filter. This will start the transmit and 1431 * receive processes. 1432 */ 1433 ae_filter_setup(sc); 1434 1435 /* 1436 * Set the current media. 1437 */ 1438 if ((error = ether_mediachange(ifp)) != 0) 1439 goto out; 1440 1441 /* 1442 * Start the mac. 1443 */ 1444 AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE); 1445 AE_BARRIER(sc); 1446 1447 /* 1448 * Write out the opmode. 1449 */ 1450 AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | 1451 ae_txthresh[sc->sc_txthresh].txth_opmode); 1452 /* 1453 * Start the receive process. 1454 */ 1455 AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD); 1456 AE_BARRIER(sc); 1457 1458 if (sc->sc_tick != NULL) { 1459 /* Start the one second clock. */ 1460 callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc); 1461 } 1462 1463 /* 1464 * Note that the interface is now running. 1465 */ 1466 ifp->if_flags |= IFF_RUNNING; 1467 ifp->if_flags &= ~IFF_OACTIVE; 1468 sc->sc_if_flags = ifp->if_flags; 1469 1470 out: 1471 if (error) { 1472 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1473 ifp->if_timer = 0; 1474 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1475 } 1476 return (error); 1477 } 1478 1479 /* 1480 * ae_enable: 1481 * 1482 * Enable the chip. 1483 */ 1484 static int 1485 ae_enable(struct ae_softc *sc) 1486 { 1487 1488 if (AE_IS_ENABLED(sc) == 0) { 1489 sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq, 1490 ae_intr, sc); 1491 if (sc->sc_ih == NULL) { 1492 printf("%s: unable to establish interrupt\n", 1493 sc->sc_dev.dv_xname); 1494 return (EIO); 1495 } 1496 sc->sc_flags |= AE_ENABLED; 1497 } 1498 return (0); 1499 } 1500 1501 /* 1502 * ae_disable: 1503 * 1504 * Disable the chip. 1505 */ 1506 static void 1507 ae_disable(struct ae_softc *sc) 1508 { 1509 1510 if (AE_IS_ENABLED(sc)) { 1511 arbus_intr_disestablish(sc->sc_ih); 1512 sc->sc_flags &= ~AE_ENABLED; 1513 } 1514 } 1515 1516 /* 1517 * ae_power: 1518 * 1519 * Power management (suspend/resume) hook. 1520 */ 1521 static void 1522 ae_power(int why, void *arg) 1523 { 1524 struct ae_softc *sc = arg; 1525 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1526 int s; 1527 1528 printf("power called: %d, %x\n", why, (uint32_t)arg); 1529 s = splnet(); 1530 switch (why) { 1531 case PWR_STANDBY: 1532 /* do nothing! */ 1533 break; 1534 case PWR_SUSPEND: 1535 ae_stop(ifp, 0); 1536 ae_disable(sc); 1537 break; 1538 case PWR_RESUME: 1539 if (ifp->if_flags & IFF_UP) { 1540 ae_enable(sc); 1541 ae_init(ifp); 1542 } 1543 break; 1544 case PWR_SOFTSUSPEND: 1545 case PWR_SOFTSTANDBY: 1546 case PWR_SOFTRESUME: 1547 break; 1548 } 1549 splx(s); 1550 } 1551 1552 /* 1553 * ae_rxdrain: 1554 * 1555 * Drain the receive queue. 1556 */ 1557 static void 1558 ae_rxdrain(struct ae_softc *sc) 1559 { 1560 struct ae_rxsoft *rxs; 1561 int i; 1562 1563 for (i = 0; i < AE_NRXDESC; i++) { 1564 rxs = &sc->sc_rxsoft[i]; 1565 if (rxs->rxs_mbuf != NULL) { 1566 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1567 m_freem(rxs->rxs_mbuf); 1568 rxs->rxs_mbuf = NULL; 1569 } 1570 } 1571 } 1572 1573 /* 1574 * ae_stop: [ ifnet interface function ] 1575 * 1576 * Stop transmission on the interface. 1577 */ 1578 static void 1579 ae_stop(struct ifnet *ifp, int disable) 1580 { 1581 struct ae_softc *sc = ifp->if_softc; 1582 struct ae_txsoft *txs; 1583 1584 if (sc->sc_tick != NULL) { 1585 /* Stop the one second clock. */ 1586 callout_stop(&sc->sc_tick_callout); 1587 } 1588 1589 /* Down the MII. */ 1590 mii_down(&sc->sc_mii); 1591 1592 /* Disable interrupts. */ 1593 AE_WRITE(sc, CSR_INTEN, 0); 1594 1595 /* Stop the transmit and receive processes. */ 1596 AE_WRITE(sc, CSR_OPMODE, 0); 1597 AE_WRITE(sc, CSR_RXLIST, 0); 1598 AE_WRITE(sc, CSR_TXLIST, 0); 1599 AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE); 1600 AE_BARRIER(sc); 1601 1602 /* 1603 * Release any queued transmit buffers. 1604 */ 1605 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1606 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1607 if (txs->txs_mbuf != NULL) { 1608 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1609 m_freem(txs->txs_mbuf); 1610 txs->txs_mbuf = NULL; 1611 } 1612 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1613 } 1614 1615 /* 1616 * Mark the interface down and cancel the watchdog timer. 1617 */ 1618 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1619 sc->sc_if_flags = ifp->if_flags; 1620 ifp->if_timer = 0; 1621 1622 if (disable) { 1623 ae_rxdrain(sc); 1624 ae_disable(sc); 1625 } 1626 1627 /* 1628 * Reset the chip (needed on some flavors to actually disable it). 1629 */ 1630 ae_reset(sc); 1631 } 1632 1633 /* 1634 * ae_add_rxbuf: 1635 * 1636 * Add a receive buffer to the indicated descriptor. 1637 */ 1638 static int 1639 ae_add_rxbuf(struct ae_softc *sc, int idx) 1640 { 1641 struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1642 struct mbuf *m; 1643 int error; 1644 1645 MGETHDR(m, M_DONTWAIT, MT_DATA); 1646 if (m == NULL) 1647 return (ENOBUFS); 1648 1649 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1650 MCLGET(m, M_DONTWAIT); 1651 if ((m->m_flags & M_EXT) == 0) { 1652 m_freem(m); 1653 return (ENOBUFS); 1654 } 1655 1656 if (rxs->rxs_mbuf != NULL) 1657 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1658 1659 rxs->rxs_mbuf = m; 1660 1661 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1662 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1663 BUS_DMA_READ|BUS_DMA_NOWAIT); 1664 if (error) { 1665 printf("%s: can't load rx DMA map %d, error = %d\n", 1666 sc->sc_dev.dv_xname, idx, error); 1667 panic("ae_add_rxbuf"); /* XXX */ 1668 } 1669 1670 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1671 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1672 1673 AE_INIT_RXDESC(sc, idx); 1674 1675 return (0); 1676 } 1677 1678 /* 1679 * ae_filter_setup: 1680 * 1681 * Set the chip's receive filter. 1682 */ 1683 static void 1684 ae_filter_setup(struct ae_softc *sc) 1685 { 1686 struct ethercom *ec = &sc->sc_ethercom; 1687 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1688 struct ether_multi *enm; 1689 struct ether_multistep step; 1690 uint32_t hash, mchash[2]; 1691 uint32_t macctl = 0; 1692 1693 /* 1694 * If the chip is running, we need to reset the interface, 1695 * and will revisit here (with IFF_RUNNING) clear. The 1696 * chip seems to really not like to have its multicast 1697 * filter programmed without a reset. 1698 */ 1699 if (ifp->if_flags & IFF_RUNNING) { 1700 (void) ae_init(ifp); 1701 return; 1702 } 1703 1704 DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n", 1705 sc->sc_dev.dv_xname, sc->sc_flags)); 1706 1707 macctl = AE_READ(sc, CSR_MACCTL); 1708 macctl &= ~(MACCTL_PR | MACCTL_PM); 1709 macctl |= MACCTL_HASH; 1710 macctl |= MACCTL_HBD; 1711 macctl |= MACCTL_PR; 1712 1713 if (ifp->if_flags & IFF_PROMISC) { 1714 macctl |= MACCTL_PR; 1715 goto allmulti; 1716 } 1717 1718 mchash[0] = mchash[1] = 0; 1719 1720 ETHER_FIRST_MULTI(step, ec, enm); 1721 while (enm != NULL) { 1722 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1723 /* 1724 * We must listen to a range of multicast addresses. 1725 * For now, just accept all multicasts, rather than 1726 * trying to set only those filter bits needed to match 1727 * the range. (At this time, the only use of address 1728 * ranges is for IP multicast routing, for which the 1729 * range is big enough to require all bits set.) 1730 */ 1731 goto allmulti; 1732 } 1733 1734 /* Verify whether we use big or little endian hashes */ 1735 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f; 1736 mchash[hash >> 5] |= 1 << (hash & 0x1f); 1737 ETHER_NEXT_MULTI(step, enm); 1738 } 1739 ifp->if_flags &= ~IFF_ALLMULTI; 1740 goto setit; 1741 1742 allmulti: 1743 ifp->if_flags |= IFF_ALLMULTI; 1744 mchash[0] = mchash[1] = 0xffffffff; 1745 macctl |= MACCTL_PM; 1746 1747 setit: 1748 AE_WRITE(sc, CSR_HTHI, mchash[0]); 1749 AE_WRITE(sc, CSR_HTHI, mchash[1]); 1750 1751 AE_WRITE(sc, CSR_MACCTL, macctl); 1752 AE_BARRIER(sc); 1753 1754 DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n", 1755 sc->sc_dev.dv_xname, macctl)); 1756 } 1757 1758 /* 1759 * ae_idle: 1760 * 1761 * Cause the transmit and/or receive processes to go idle. 1762 */ 1763 void 1764 ae_idle(struct ae_softc *sc, u_int32_t bits) 1765 { 1766 static const char * const txstate_names[] = { 1767 "STOPPED", 1768 "RUNNING - FETCH", 1769 "RUNNING - WAIT", 1770 "RUNNING - READING", 1771 "-- RESERVED --", 1772 "RUNNING - SETUP", 1773 "SUSPENDED", 1774 "RUNNING - CLOSE", 1775 }; 1776 static const char * const rxstate_names[] = { 1777 "STOPPED", 1778 "RUNNING - FETCH", 1779 "RUNNING - CHECK", 1780 "RUNNING - WAIT", 1781 "SUSPENDED", 1782 "RUNNING - CLOSE", 1783 "RUNNING - FLUSH", 1784 "RUNNING - QUEUE", 1785 }; 1786 1787 u_int32_t csr, ackmask = 0; 1788 int i; 1789 1790 if (bits & OPMODE_ST) 1791 ackmask |= STATUS_TPS; 1792 1793 if (bits & OPMODE_SR) 1794 ackmask |= STATUS_RPS; 1795 1796 AE_CLR(sc, CSR_OPMODE, bits); 1797 1798 for (i = 0; i < 1000; i++) { 1799 if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask) 1800 break; 1801 delay(10); 1802 } 1803 1804 csr = AE_READ(sc, CSR_STATUS); 1805 if ((csr & ackmask) != ackmask) { 1806 if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 && 1807 (csr & STATUS_TS) != STATUS_TS_STOPPED) { 1808 printf("%s: transmit process failed to idle: " 1809 "state %s\n", sc->sc_dev.dv_xname, 1810 txstate_names[(csr & STATUS_TS) >> 20]); 1811 } 1812 if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 && 1813 (csr & STATUS_RS) != STATUS_RS_STOPPED) { 1814 printf("%s: receive process failed to idle: " 1815 "state %s\n", sc->sc_dev.dv_xname, 1816 rxstate_names[(csr & STATUS_RS) >> 17]); 1817 } 1818 } 1819 } 1820 1821 /***************************************************************************** 1822 * Support functions for MII-attached media. 1823 *****************************************************************************/ 1824 1825 /* 1826 * ae_mii_tick: 1827 * 1828 * One second timer, used to tick the MII. 1829 */ 1830 static void 1831 ae_mii_tick(void *arg) 1832 { 1833 struct ae_softc *sc = arg; 1834 int s; 1835 1836 if (!device_is_active(&sc->sc_dev)) 1837 return; 1838 1839 s = splnet(); 1840 mii_tick(&sc->sc_mii); 1841 splx(s); 1842 1843 callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc); 1844 } 1845 1846 /* 1847 * ae_mii_statchg: [mii interface function] 1848 * 1849 * Callback from PHY when media changes. 1850 */ 1851 static void 1852 ae_mii_statchg(device_t self) 1853 { 1854 struct ae_softc *sc = device_private(self); 1855 uint32_t macctl, flowc; 1856 1857 //opmode = AE_READ(sc, CSR_OPMODE); 1858 macctl = AE_READ(sc, CSR_MACCTL); 1859 1860 /* XXX: do we need to do this? */ 1861 /* Idle the transmit and receive processes. */ 1862 //ae_idle(sc, OPMODE_ST|OPMODE_SR); 1863 1864 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1865 flowc = FLOWC_FCE; 1866 macctl &= ~MACCTL_DRO; 1867 macctl |= MACCTL_FDX; 1868 } else { 1869 flowc = 0; /* cannot do flow control in HDX */ 1870 macctl |= MACCTL_DRO; 1871 macctl &= ~MACCTL_FDX; 1872 } 1873 1874 AE_WRITE(sc, CSR_FLOWC, flowc); 1875 AE_WRITE(sc, CSR_MACCTL, macctl); 1876 1877 /* restore operational mode */ 1878 //AE_WRITE(sc, CSR_OPMODE, opmode); 1879 AE_BARRIER(sc); 1880 } 1881 1882 /* 1883 * ae_mii_readreg: 1884 * 1885 * Read a PHY register. 1886 */ 1887 static int 1888 ae_mii_readreg(device_t self, int phy, int reg) 1889 { 1890 struct ae_softc *sc = device_private(self); 1891 uint32_t addr; 1892 int i; 1893 1894 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT); 1895 AE_WRITE(sc, CSR_MIIADDR, addr); 1896 AE_BARRIER(sc); 1897 for (i = 0; i < 100000000; i++) { 1898 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1899 break; 1900 } 1901 1902 return (AE_READ(sc, CSR_MIIDATA) & 0xffff); 1903 } 1904 1905 /* 1906 * ae_mii_writereg: 1907 * 1908 * Write a PHY register. 1909 */ 1910 static void 1911 ae_mii_writereg(device_t self, int phy, int reg, int val) 1912 { 1913 struct ae_softc *sc = device_private(self); 1914 uint32_t addr; 1915 int i; 1916 1917 /* write the data register */ 1918 AE_WRITE(sc, CSR_MIIDATA, val); 1919 1920 /* write the address to latch it in */ 1921 addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) | 1922 MIIADDR_WRITE; 1923 AE_WRITE(sc, CSR_MIIADDR, addr); 1924 AE_BARRIER(sc); 1925 1926 for (i = 0; i < 100000000; i++) { 1927 if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0) 1928 break; 1929 } 1930 } 1931